metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "model.py",
"repo_name": "andreicuceu/vega",
"repo_path": "vega_extracted/vega-master/vega/model.py",
"type": "Python"
}
|
from . import power_spectrum
from . import pktoxi
from . import correlation_func as corr_func
from . import metals
from . import broadband_poly
class Model:
"""
Class for computing Lyman-alpha forest correlation function models.
"""
def __init__(self, corr_item, fiducial, scale_params, data=None):
"""
Parameters
----------
corr_item : CorrelationItem
Item object with the component config
fiducial : dict
fiducial config
scale_params : ScaleParameters
ScaleParameters object
data : Data, optional
data object corresponding to the cf component, by default None
"""
self._corr_item = corr_item
self._model_pk = corr_item.model_pk
assert corr_item.model_coordinates is not None
self._data = data
data_has_distortion = False
if self._data is not None:
data_has_distortion = self._data.has_distortion
self._has_distortion_mat = corr_item.has_distortion and data_has_distortion
self._corr_item.config['model']['bin_size_rp'] = str(corr_item.data_coordinates.rp_binsize)
self._corr_item.config['model']['bin_size_rt'] = str(corr_item.data_coordinates.rt_binsize)
self.save_components = fiducial.get('save-components', False)
if self.save_components:
self.pk = {'peak': {}, 'smooth': {}, 'full': {}}
self.xi = {'peak': {}, 'smooth': {}, 'full': {}}
self.xi_distorted = {'peak': {}, 'smooth': {}, 'full': {}}
# Initialize Broadband
self.broadband = None
if 'broadband' in self._corr_item.config:
self.broadband = broadband_poly.BroadbandPolynomials(
self._corr_item.config['broadband'], self._corr_item.name,
corr_item.model_coordinates, corr_item.dist_model_coordinates
)
# Initialize main Power Spectrum object
self.Pk_core = power_spectrum.PowerSpectrum(
self._corr_item.config['model'], fiducial, self._corr_item.tracer1,
self._corr_item.tracer2, self._corr_item.name
)
# Initialize the Pk to Xi transform
self.PktoXi = pktoxi.PktoXi.init_from_Pk(self.Pk_core, self._corr_item.config['model'])
# Initialize main Correlation function object
self.Xi_core = corr_func.CorrelationFunction(
self._corr_item.config['model'], fiducial, corr_item.model_coordinates,
scale_params, self._corr_item.tracer1, self._corr_item.tracer2
)
# Initialize metals if needed
self.metals = None
if self._corr_item.has_metals:
self.metals = metals.Metals(corr_item, fiducial, scale_params, data)
self.no_metal_decomp = corr_item.config['model'].getboolean('no-metal-decomp', True)
self._instrumental_systematics_flag = corr_item.config['model'].getboolean(
'desi-instrumental-systematics', False)
def _compute_model(self, pars, pk_lin, component='smooth', xi_metals=None):
"""Compute a model correlation function given the input pars
and a fiducial linear power spectrum.
This is used internally for computing the peak and smooth
components separately.
Parameters
----------
pars : dict
Computation parameters
pk_lin : 1D Array
Linear power spectrum
component : str, optional
Name of pk component, used as key for dictionary of saved
components ('peak' or 'smooth' or 'full'), by default 'smooth'
xi_metals : 1D Array, optional
Metal correlation functions, by default None
Returns
-------
1D Array
Model correlation function for the specified component
"""
# Compute core model correlation function
pk_model = self.Pk_core.compute(pk_lin, pars)
if self._model_pk:
return self.PktoXi.compute_pk_ells(pk_model)
# Protect against old caches that have not been cleaned
self.PktoXi.cache_pars = None
xi_model = self.Xi_core.compute(pk_model, pk_lin, self.PktoXi, pars)
# Save the components
if self.save_components:
self.pk[component]['core'] = pk_model.copy()
self.xi[component]['core'] = xi_model.copy()
# Compute metal correlations
if self._corr_item.has_metals:
if self.no_metal_decomp and xi_metals is not None:
xi_model += xi_metals
elif not self.no_metal_decomp:
xi_model += self.metals.compute(pars, pk_lin, component)
# Merge saved metal components into the member dictionaries
if self.save_components:
self.pk[component] = {**self.pk[component], **self.metals.pk[component]}
self.xi[component] = {**self.xi[component], **self.metals.xi[component]}
self.xi_distorted[component] = {**self.xi_distorted[component],
**self.metals.xi_distorted[component]}
# Add DESI instrumental systematics model
if self._instrumental_systematics_flag and component != 'peak':
xi_model += self.Xi_core.compute_desi_instrumental_systematics(
pars, self._corr_item.data_coordinates.rp_binsize)
# Apply pre distortion broadband
if self.broadband is not None:
xi_model *= self.broadband.compute(pars, 'pre-mul')
xi_model += self.broadband.compute(pars, 'pre-add')
# Apply the distortion matrix
if self._has_distortion_mat:
xi_model = self._data.distortion_mat.dot(xi_model)
# Apply post distortion broadband
if self.broadband is not None:
xi_model *= self.broadband.compute(pars, 'post-mul')
xi_model += self.broadband.compute(pars, 'post-add')
# Save final xi
if self.save_components:
self.xi_distorted[component]['core'] = xi_model.copy()
return xi_model
def compute(self, pars, pk_full, pk_smooth):
"""Compute correlation function model using the peak/smooth
(wiggles/no-wiggles) decomposition.
Parameters
----------
pars : dict
Computation parameters
pk_full : 1D Array
Full fiducial linear power spectrum
pk_smooth : 1D Array
Smooth component of the fiducial linear power spectrum
Returns
-------
1D Array
Full correlation function
"""
pars['peak'] = True
xi_peak = self._compute_model(pars, pk_full - pk_smooth, 'peak')
pars['peak'] = False
xi_metals = None
if self._corr_item.has_metals and self.no_metal_decomp:
xi_metals = self.metals.compute(pars, pk_full, 'full')
xi_smooth = self._compute_model(pars, pk_smooth, 'smooth', xi_metals=xi_metals)
xi_full = pars['bao_amp'] * xi_peak + xi_smooth
return xi_full
def compute_direct(self, pars, pk_full):
"""Compute full correlation function model directly from the full
power spectrum.
Parameters
----------
pars : dict
Computation parameters
pk_full : 1D Array
Full fiducial linear power spectrum
Returns
-------
1D Array
Full correlation function
"""
pars['peak'] = False
xi_full = self._compute_model(pars, pk_full, 'full')
return xi_full
|
andreicuceuREPO_NAMEvegaPATH_START.@vega_extracted@vega-master@vega@model.py@.PATH_END.py
|
{
"filename": "lahey.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py3/numpy/distutils/fcompiler/lahey.py",
"type": "Python"
}
|
import os
from numpy.distutils.fcompiler import FCompiler
compilers = ['LaheyFCompiler']
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g', '--chk', '--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d, 'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='lahey').get_version())
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py3@numpy@distutils@fcompiler@lahey.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/waterfall/connector/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "waterfall.connector"
_path_str = "waterfall.connector.line"
_valid_props = {"color", "dash", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.connector.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.waterfall.connector.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.connector.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@waterfall@connector@_line.py@.PATH_END.py
|
{
"filename": "test_boundary.py",
"repo_name": "hannorein/REBOUND",
"repo_path": "REBOUND_extracted/REBOUND-main/rebound/tests/test_boundary.py",
"type": "Python"
}
|
import rebound
import unittest
class TestBoundary(unittest.TestCase):
def test_open(self):
sim = rebound.Simulation()
sim.boundary = "open"
sim.configure_box(10.)
sim.add(m=0.1,x=1., vx=5.0)
sim.add(m=0.1,x=-1., vx=-5.0)
sim.add(m=0.1,y=1., vx=6.0)
sim.add(m=0.1,x=-1., y=-1., vx=-3., vy=-3.)
self.assertEqual(sim.N,4)
sim.integrate(1.)
self.assertEqual(sim.N,1)
with self.assertRaises(rebound.NoParticles):
sim.integrate(2.)
def test_periodic(self):
sim = rebound.Simulation()
sim.boundary = "periodic"
sim.configure_box(10.)
sim.add(m=0.1,x=1., vx=5.0, vy=15.1, vz=26.)
sim.integrate(1.)
self.assertAlmostEqual(sim.particles[0].x,-4,delta=1e-16)
sim.integrate(2.)
self.assertAlmostEqual(sim.particles[0].x,1,delta=1e-16)
self.assertEqual(sim.N,1)
if __name__ == "__main__":
unittest.main()
|
hannoreinREPO_NAMEREBOUNDPATH_START.@REBOUND_extracted@REBOUND-main@rebound@tests@test_boundary.py@.PATH_END.py
|
{
"filename": "kinetica.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/vectorstores/kinetica.py",
"type": "Python"
}
|
from __future__ import annotations
import asyncio
import enum
import json
import logging
import struct
import uuid
from collections import OrderedDict
from enum import Enum
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type
import numpy as np
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
from pydantic_settings import BaseSettings, SettingsConfigDict
from langchain_community.vectorstores.utils import maximal_marginal_relevance
class DistanceStrategy(str, enum.Enum):
"""Enumerator of the Distance strategies."""
EUCLIDEAN = "l2"
COSINE = "cosine"
MAX_INNER_PRODUCT = "inner"
def _results_to_docs(docs_and_scores: Any) -> List[Document]:
"""Return docs from docs and scores."""
return [doc for doc, _ in docs_and_scores]
class Dimension(int, Enum):
"""Some default dimensions for known embeddings."""
OPENAI = 1536
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.EUCLIDEAN
_LANGCHAIN_DEFAULT_SCHEMA_NAME = "langchain" ## Default Kinetica schema name
_LANGCHAIN_DEFAULT_COLLECTION_NAME = (
"langchain_kinetica_embeddings" ## Default Kinetica table name
)
class KineticaSettings(BaseSettings):
"""`Kinetica` client configuration.
Attribute:
host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
metric (str) : Metric to compute distance,
supported are ('angular', 'euclidean', 'manhattan', 'hamming',
'dot'). Defaults to 'angular'.
https://github.com/spotify/annoy/blob/main/src/annoymodule.cc#L149-L169
"""
host: str = "http://127.0.0.1"
port: int = 9191
username: Optional[str] = None
password: Optional[str] = None
database: str = _LANGCHAIN_DEFAULT_SCHEMA_NAME
table: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME
metric: str = DEFAULT_DISTANCE_STRATEGY.value
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
model_config = SettingsConfigDict(
env_file=".env",
env_file_encoding="utf-8",
env_prefix="kinetica_",
extra="ignore",
)
class Kinetica(VectorStore):
"""`Kinetica` vector store.
To use, you should have the ``gpudb`` python package installed.
Args:
config: Kinetica connection settings class.
embedding_function: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
collection_name: The name of the collection to use. (default: langchain)
NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete the collection if it exists.
(default: False). Useful for testing.
engine_args: SQLAlchemy's create engine arguments.
Example:
.. code-block:: python
from langchain_community.vectorstores import Kinetica, KineticaSettings
from langchain_community.embeddings.openai import OpenAIEmbeddings
kinetica_settings = KineticaSettings(
host="http://127.0.0.1", username="", password=""
)
COLLECTION_NAME = "kinetica_store"
embeddings = OpenAIEmbeddings()
vectorstore = Kinetica.from_documents(
documents=docs,
embedding=embeddings,
collection_name=COLLECTION_NAME,
config=kinetica_settings,
)
"""
def __init__(
self,
config: KineticaSettings,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
schema_name: str = _LANGCHAIN_DEFAULT_SCHEMA_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
"""Constructor for the Kinetica class
Args:
config (KineticaSettings): a `KineticaSettings` instance
embedding_function (Embeddings): embedding function to use
collection_name (str, optional): the Kinetica table name.
Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME.
schema_name (str, optional): the Kinetica table name.
Defaults to _LANGCHAIN_DEFAULT_SCHEMA_NAME.
distance_strategy (DistanceStrategy, optional): _description_.
Defaults to DEFAULT_DISTANCE_STRATEGY.
pre_delete_collection (bool, optional): _description_. Defaults to False.
logger (Optional[logging.Logger], optional): _description_.
Defaults to None.
"""
self._config = config
self.embedding_function = embedding_function
self.collection_name = collection_name
self.schema_name = schema_name
self._distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self._db = self.__get_db(self._config)
def __post_init__(self, dimensions: int) -> None:
"""
Initialize the store.
"""
try:
from gpudb import GPUdbTable
except ImportError:
raise ImportError(
"Could not import Kinetica python API. "
"Please install it with `pip install gpudb>=7.2.2.0`."
)
self.dimensions = dimensions
dimension_field = f"vector({dimensions})"
if self.pre_delete_collection:
self.delete_schema()
self.table_name = self.collection_name
if self.schema_name is not None and len(self.schema_name) > 0:
self.table_name = f"{self.schema_name}.{self.collection_name}"
self.table_schema = [
["text", "string"],
["embedding", "bytes", dimension_field],
["metadata", "string", "json"],
["id", "string", "uuid"],
]
self.create_schema()
self.EmbeddingStore: GPUdbTable = self.create_tables_if_not_exists()
def __get_db(self, config: KineticaSettings) -> Any:
try:
from gpudb import GPUdb
except ImportError:
raise ImportError(
"Could not import Kinetica python API. "
"Please install it with `pip install gpudb>=7.2.2.0`."
)
options = GPUdb.Options()
options.username = config.username
options.password = config.password
options.skip_ssl_cert_verification = True
return GPUdb(host=config.host, options=options)
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
@classmethod
def __from(
cls,
config: KineticaSettings,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
dimensions: int,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
*,
schema_name: str = _LANGCHAIN_DEFAULT_SCHEMA_NAME,
**kwargs: Any,
) -> Kinetica:
"""Class method to assist in constructing the `Kinetica` store instance
using different combinations of parameters
Args:
config (KineticaSettings): a `KineticaSettings` instance
texts (List[str]): The list of texts to generate embeddings for and store
embeddings (List[List[float]]): List of embeddings
embedding (Embeddings): the Embedding function
dimensions (int): The number of dimensions the embeddings have
metadatas (Optional[List[dict]], optional): List of JSON data associated
with each text. Defaults to None.
ids (Optional[List[str]], optional): List of unique IDs (UUID by default)
associated with each text. Defaults to None.
collection_name (str, optional): Kinetica table name.
Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME.
schema_name (str, optional): Kinetica schema name.
Defaults to _LANGCHAIN_DEFAULT_SCHEMA_NAME.
distance_strategy (DistanceStrategy, optional): Not used for now.
Defaults to DEFAULT_DISTANCE_STRATEGY.
pre_delete_collection (bool, optional): Whether to delete the Kinetica
schema or not. Defaults to False.
logger (Optional[logging.Logger], optional): Logger to use for logging at
different levels. Defaults to None.
Returns:
Kinetica: An instance of Kinetica class
"""
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
store = cls(
config=config,
collection_name=collection_name,
schema_name=schema_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
logger=logger,
**kwargs,
)
store.__post_init__(dimensions)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def create_tables_if_not_exists(self) -> Any:
"""Create the table to store the texts and embeddings"""
try:
from gpudb import GPUdbTable
except ImportError:
raise ImportError(
"Could not import Kinetica python API. "
"Please install it with `pip install gpudb>=7.2.2.0`."
)
return GPUdbTable(
_type=self.table_schema,
name=self.table_name,
db=self._db,
options={"is_replicated": "true"},
)
def drop_tables(self) -> None:
"""Delete the table"""
self._db.clear_table(
f"{self.table_name}", options={"no_error_if_not_exists": "true"}
)
def create_schema(self) -> None:
"""Create a new Kinetica schema"""
self._db.create_schema(self.schema_name)
def delete_schema(self) -> None:
"""Delete a Kinetica schema with cascade set to `true`
This method will delete a schema with all tables in it.
"""
self.logger.debug("Trying to delete collection")
self._db.drop_schema(
self.schema_name, {"no_error_if_not_exists": "true", "cascade": "true"}
)
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
ids: List of ids for the text embedding pairs
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
records = []
for text, embedding, metadata, id in zip(texts, embeddings, metadatas, ids):
buf = struct.pack("%sf" % self.dimensions, *embedding)
records.append([text, buf, json.dumps(metadata), id])
self.EmbeddingStore.insert_records(records)
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas (JSON data) associated with the texts.
ids: List of IDs (UUID) for the texts supplied; will be generated if None
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
self.dimensions = len(embeddings[0])
if not hasattr(self, "EmbeddingStore"):
self.__post_init__(self.dimensions)
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Kinetica with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
# from gpudb import GPUdbException
resp: Dict = self.__query_collection(embedding, k, filter)
if resp and resp["status_info"]["status"] == "OK" and "records" in resp:
records: OrderedDict = resp["records"]
results = list(zip(*list(records.values())))
return self._results_to_docs_and_scores(results)
self.logger.error(resp["status_info"]["message"])
# raise GPUdbException(resp["status_info"]["message"])
return []
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]:
"""Return docs and scores from results."""
docs = (
[
(
Document(
page_content=result[0],
metadata=json.loads(result[1]),
),
result[2] if self.embedding_function is not None else None,
)
for result in results
]
if len(results) > 0
else []
)
return docs
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to Kinetica constructor."
)
@property
def distance_strategy(self) -> str:
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return "l2_distance"
elif self._distance_strategy == DistanceStrategy.COSINE:
return "cosine_distance"
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return "dot_product"
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. "
f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
def __query_collection(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> Dict:
"""Query the collection."""
# if filter is not None:
# filter_clauses = []
# for key, value in filter.items():
# IN = "in"
# if isinstance(value, dict) and IN in map(str.lower, value):
# value_case_insensitive = {
# k.lower(): v for k, v in value.items()
# }
# filter_by_metadata = self.EmbeddingStore.cmetadata[
# key
# ].astext.in_(value_case_insensitive[IN])
# filter_clauses.append(filter_by_metadata)
# else:
# filter_by_metadata = self.EmbeddingStore.cmetadata[
# key
# ].astext == str(value)
# filter_clauses.append(filter_by_metadata)
json_filter = json.dumps(filter) if filter is not None else None
where_clause = (
f" where '{json_filter}' = JSON(metadata) "
if json_filter is not None
else ""
)
embedding_str = "[" + ",".join([str(x) for x in embedding]) + "]"
dist_strategy = self.distance_strategy
query_string = f"""
SELECT text, metadata, {dist_strategy}(embedding, '{embedding_str}')
as distance, embedding
FROM "{self.schema_name}"."{self.collection_name}"
{where_clause}
ORDER BY distance asc NULLS LAST
LIMIT {k}
"""
self.logger.debug(query_string)
resp = self._db.execute_sql_and_decode(query_string)
self.logger.debug(resp)
return resp
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
resp = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter)
records: OrderedDict = resp["records"]
results = list(zip(*list(records.values())))
embedding_list = [
struct.unpack("%sf" % self.dimensions, embedding)
for embedding in records["embedding"]
]
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
embedding_list,
k=k,
lambda_mult=lambda_mult,
)
candidates = self._results_to_docs_and_scores(results)
return [r for i, r in enumerate(candidates) if i in mmr_selected]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
def max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return _results_to_docs(docs_and_scores)
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(
self.max_marginal_relevance_search_by_vector,
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return await asyncio.get_event_loop().run_in_executor(None, func)
@classmethod
def from_texts(
cls: Type[Kinetica],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
config: KineticaSettings = KineticaSettings(),
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
*,
schema_name: str = _LANGCHAIN_DEFAULT_SCHEMA_NAME,
**kwargs: Any,
) -> Kinetica:
"""Adds the texts passed in to the vector store and returns it
Args:
cls (Type[Kinetica]): Kinetica class
texts (List[str]): A list of texts for which the embeddings are generated
embedding (Embeddings): List of embeddings
metadatas (Optional[List[dict]], optional): List of dicts, JSON
describing the texts/documents. Defaults to None.
config (KineticaSettings): a `KineticaSettings` instance
collection_name (str, optional): Kinetica schema name.
Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME.
schema_name (str, optional): Kinetica schema name.
Defaults to _LANGCHAIN_DEFAULT_SCHEMA_NAME.
distance_strategy (DistanceStrategy, optional): Distance strategy
e.g., l2, cosine etc.. Defaults to DEFAULT_DISTANCE_STRATEGY.
ids (Optional[List[str]], optional): A list of UUIDs for each
text/document. Defaults to None.
pre_delete_collection (bool, optional): Indicates whether the Kinetica
schema is to be deleted or not. Defaults to False.
Returns:
Kinetica: a `Kinetica` instance
"""
if len(texts) == 0:
raise ValueError("texts is empty")
try:
first_embedding = embedding.embed_documents(texts[0:1])
except NotImplementedError:
first_embedding = [embedding.embed_query(texts[0])]
dimensions = len(first_embedding[0])
embeddings = embedding.embed_documents(list(texts))
kinetica_store = cls.__from(
texts=texts,
embeddings=embeddings,
embedding=embedding,
dimensions=dimensions,
config=config,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
schema_name=schema_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
return kinetica_store
@classmethod
def from_embeddings(
cls: Type[Kinetica],
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
config: KineticaSettings = KineticaSettings(),
dimensions: int = Dimension.OPENAI,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
*,
schema_name: str = _LANGCHAIN_DEFAULT_SCHEMA_NAME,
**kwargs: Any,
) -> Kinetica:
"""Adds the embeddings passed in to the vector store and returns it
Args:
cls (Type[Kinetica]): Kinetica class
text_embeddings (List[Tuple[str, List[float]]]): A list of texts
and the embeddings
embedding (Embeddings): List of embeddings
metadatas (Optional[List[dict]], optional): List of dicts, JSON describing
the texts/documents. Defaults to None.
config (KineticaSettings): a `KineticaSettings` instance
dimensions (int, optional): Dimension for the vector data, if not passed a
default will be used. Defaults to Dimension.OPENAI.
collection_name (str, optional): Kinetica schema name.
Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME.
schema_name (str, optional): Kinetica schema name.
Defaults to _LANGCHAIN_DEFAULT_SCHEMA_NAME.
distance_strategy (DistanceStrategy, optional): Distance strategy
e.g., l2, cosine etc.. Defaults to DEFAULT_DISTANCE_STRATEGY.
ids (Optional[List[str]], optional): A list of UUIDs for each text/document.
Defaults to None.
pre_delete_collection (bool, optional): Indicates whether the
Kinetica schema is to be deleted or not. Defaults to False.
Returns:
Kinetica: a `Kinetica` instance
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
dimensions = len(embeddings[0])
return cls.__from(
texts=texts,
embeddings=embeddings,
embedding=embedding,
dimensions=dimensions,
config=config,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
schema_name=schema_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_documents(
cls: Type[Kinetica],
documents: List[Document],
embedding: Embeddings,
config: KineticaSettings = KineticaSettings(),
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
*,
schema_name: str = _LANGCHAIN_DEFAULT_SCHEMA_NAME,
**kwargs: Any,
) -> Kinetica:
"""Adds the list of `Document` passed in to the vector store and returns it
Args:
cls (Type[Kinetica]): Kinetica class
texts (List[str]): A list of texts for which the embeddings are generated
embedding (Embeddings): List of embeddings
config (KineticaSettings): a `KineticaSettings` instance
metadatas (Optional[List[dict]], optional): List of dicts, JSON describing
the texts/documents. Defaults to None.
collection_name (str, optional): Kinetica schema name.
Defaults to _LANGCHAIN_DEFAULT_COLLECTION_NAME.
schema_name (str, optional): Kinetica schema name.
Defaults to _LANGCHAIN_DEFAULT_SCHEMA_NAME.
distance_strategy (DistanceStrategy, optional): Distance strategy
e.g., l2, cosine etc.. Defaults to DEFAULT_DISTANCE_STRATEGY.
ids (Optional[List[str]], optional): A list of UUIDs for each text/document.
Defaults to None.
pre_delete_collection (bool, optional): Indicates whether the Kinetica
schema is to be deleted or not. Defaults to False.
Returns:
Kinetica: a `Kinetica` instance
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
config=config,
collection_name=collection_name,
schema_name=schema_name,
distance_strategy=distance_strategy,
ids=ids,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@vectorstores@kinetica.py@.PATH_END.py
|
{
"filename": "initializers.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/flax/linen/initializers.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initializers for Flax."""
# pylint: disable=unused-import
# re-export initializer functions from jax.nn
from jax.nn.initializers import constant as constant
from jax.nn.initializers import delta_orthogonal as delta_orthogonal
from jax.nn.initializers import glorot_normal as glorot_normal
from jax.nn.initializers import glorot_uniform as glorot_uniform
from jax.nn.initializers import he_normal as he_normal
from jax.nn.initializers import he_uniform as he_uniform
from jax.nn.initializers import kaiming_normal as kaiming_normal
from jax.nn.initializers import kaiming_uniform as kaiming_uniform
from jax.nn.initializers import lecun_normal as lecun_normal
from jax.nn.initializers import lecun_uniform as lecun_uniform
from jax.nn.initializers import normal as normal
from jax.nn.initializers import ones as ones
from jax.nn.initializers import orthogonal as orthogonal
from jax.nn.initializers import truncated_normal as truncated_normal
from jax.nn.initializers import uniform as uniform
from jax.nn.initializers import variance_scaling as variance_scaling
from jax.nn.initializers import xavier_normal as xavier_normal
from jax.nn.initializers import xavier_uniform as xavier_uniform
from jax.nn.initializers import zeros as zeros
from flax.typing import Initializer as Initializer
# pylint: enable=unused-import
def zeros_init() -> Initializer:
"""Builds an initializer that returns a constant array full of zeros.
>>> import jax, jax.numpy as jnp
>>> from flax.linen.initializers import zeros_init
>>> zeros_initializer = zeros_init()
>>> zeros_initializer(jax.random.key(42), (2, 3), jnp.float32)
Array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
return zeros
def ones_init() -> Initializer:
"""Builds an initializer that returns a constant array full of ones.
>>> import jax, jax.numpy as jnp
>>> from flax.linen.initializers import ones_init
>>> ones_initializer = ones_init()
>>> ones_initializer(jax.random.key(42), (3, 2), jnp.float32)
Array([[1., 1.],
[1., 1.],
[1., 1.]], dtype=float32)
"""
return ones
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@flax@linen@initializers.py@.PATH_END.py
|
{
"filename": "mag.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/webserver/static/files/mag.py",
"type": "Python"
}
|
"""
Computing apparent magnitude from difference magnitudes for ZTF
Adapted by Roy Williams from Eric Bellm's notebook
https://github.com/ZwickyTransientFacility/ztf-avro-alert/blob/master/notebooks/Variable_star_lightcurves.ipynb
"""
import math
def dc_mag_dict(fid, magpsf,sigmapsf, magnr,sigmagnr, magzpsci, isdiffpos):
"""dc_mag_dict.
Compute apparent magnitude from difference magnitude supplied by ZTF
Args:
fid: filter, 1 for green and 2 for red
magpsf:
x,sigmapsf: magnitude from PSF-fit photometry, and 1-sigma error
magnr:
sigmagnr: magnitude of nearest source in reference image PSF-catalog within 30 arcsec
and 1-sigma error
magzpsci: Magnitude zero point for photometry estimates
isdiffpos:
t => candidate is from positive (sci minus ref) subtraction;
f => candidate is from negative (ref minus sci) subtraction
"""
# zero points. Looks like they are fixed.
ref_zps = {1:26.325, 2:26.275, 3:25.660}
magzpref = ref_zps[fid]
# reference flux and its error
magdiff = magzpref - magnr
if magdiff > 12.0:
magdiff = 12.0
ref_flux = 10**( 0.4* ( magdiff) )
ref_sigflux = (sigmagnr/1.0857)*ref_flux
# difference flux and its error
if magzpsci == 0.0: magzpsci = magzpref
magdiff = magzpsci - magpsf
if magdiff > 12.0:
magdiff = 12.0
difference_flux = 10**( 0.4* ( magdiff) )
difference_sigflux = (sigmapsf/1.0857)*difference_flux
# add or subract difference flux based on isdiffpos
if isdiffpos == 't':
dc_flux = ref_flux + difference_flux
elif isdiffpos == 'f':
dc_flux = ref_flux - difference_flux
else:
print('Unkown isdiffpos=%s' % isdiffpos)
# assumes errors are independent. Maybe too conservative.
dc_sigflux = math.sqrt( difference_sigflux**2 + ref_sigflux**2 )
# apparent mag and its error from fluxes
if dc_flux > 0.0:
dc_mag = magzpsci - 2.5 * math.log10(dc_flux)
dc_sigmag = dc_sigflux/dc_flux*1.0857
else:
dc_mag = magzpsci
dc_sigmag = sigmapsf
return {'dc_mag':dc_mag, 'dc_sigmag':dc_sigmag}
if __name__ == "__main__":
fid = 1
magpsf = 17.7439
sigmapsf = 0.1057
magnr = 14.7309
sigmagnr = 0.0189
magzpsci = 26.1389
isdiffpos = 't'
d = dc_mag_dict(fid, magpsf,sigmapsf, magnr,sigmagnr, magzpsci, isdiffpos)
print('As a positive difference', d)
isdiffpos = 'f'
d = dc_mag_dict(fid, magpsf,sigmapsf, magnr,sigmagnr, magzpsci, isdiffpos)
print('As a negative difference', d)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@webserver@static@files@mag.py@.PATH_END.py
|
{
"filename": "sixdf_2011_bao.py",
"repo_name": "CobayaSampler/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/bao/sixdf_2011_bao.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import BAO
class sixdf_2011_bao(BAO):
r"""
Likelihood of the BAO detection of the 6dF Galaxy Survey \cite{Beutler:2012px}.
"""
pass
|
CobayaSamplerREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@bao@sixdf_2011_bao.py@.PATH_END.py
|
{
"filename": "_ids.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnelarea/_ids.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="funnelarea", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnelarea@_ids.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "bradkav/NbodyIMRI",
"repo_path": "NbodyIMRI_extracted/NbodyIMRI-main/test_snapshots/README.md",
"type": "Markdown"
}
|
#### Test Snapshots
|
bradkavREPO_NAMENbodyIMRIPATH_START.@NbodyIMRI_extracted@NbodyIMRI-main@test_snapshots@README.md@.PATH_END.py
|
{
"filename": "_insidetextfont.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/_insidetextfont.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class InsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="insidetextfont", parent_name="funnel", **kwargs):
super(InsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Insidetextfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@_insidetextfont.py@.PATH_END.py
|
{
"filename": "_ticklabelposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/volume/colorbar/_ticklabelposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ticklabelposition", parent_name="volume.colorbar", **kwargs
):
super(TicklabelpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values",
[
"outside",
"inside",
"outside top",
"inside top",
"outside bottom",
"inside bottom",
],
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@volume@colorbar@_ticklabelposition.py@.PATH_END.py
|
{
"filename": "toast_cache_test.py",
"repo_name": "hpc4cmb/toast",
"repo_path": "toast_extracted/toast-main/pipelines/toast_cache_test.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""Test the ability to free memory from a toast.Cache.
This stores the following objects per detector in a Cache:
- Detector signal as float64
- Detector flags as uint8
- Detector pointing pixel numbers as int64
- Detector pointing weights as float32
It reports the memory available before and after this allocation.
Then it frees the buffers of a given type from all detectors and
compares the resulting change to what is expected.
"""
import os
import re
import sys
import argparse
import traceback
import psutil
import numpy as np
from toast.utils import Logger
from toast.cache import Cache
def main():
log = Logger.get()
parser = argparse.ArgumentParser(description="Allocate and free cache objects.")
parser.add_argument(
"--ndet", required=False, type=int, default=10, help="The number of detectors"
)
parser.add_argument(
"--nobs", required=False, type=int, default=2, help="The number of observations"
)
parser.add_argument(
"--obsminutes",
required=False,
type=int,
default=60,
help="The number of minutes in each observation.",
)
parser.add_argument(
"--rate", required=False, type=float, default=37.0, help="The sample rate."
)
parser.add_argument(
"--nloop",
required=False,
type=int,
default=2,
help="The number of allocate / free loops",
)
args = parser.parse_args()
log.info("Input parameters:")
log.info(" {} observations".format(args.nobs))
log.info(" {} minutes per obs".format(args.obsminutes))
log.info(" {} detectors per obs".format(args.ndet))
log.info(" {}Hz sample rate".format(args.rate))
nsampobs = int(args.obsminutes * 60 * args.rate)
nsamptot = args.ndet * args.nobs * nsampobs
log.info("{} total samples across all detectors and observations".format(nsamptot))
bytes_sigobs = nsampobs * 8
bytes_sigtot = nsamptot * 8
bytes_flagobs = nsampobs * 1
bytes_flagtot = nsamptot * 1
bytes_pixobs = nsampobs * 8
bytes_pixtot = nsamptot * 8
bytes_wtobs = 3 * nsampobs * 4
bytes_wttot = 3 * nsamptot * 4
bytes_tot = bytes_sigtot + bytes_flagtot + bytes_pixtot + bytes_wttot
bytes_tot_mb = bytes_tot / 2 ** 20
log.info(
"{} total bytes ({:0.2f}MB) of data expected".format(bytes_tot, bytes_tot_mb)
)
for lp in range(args.nloop):
log.info("Allocation loop {:02d}".format(lp))
vmem = psutil.virtual_memory()._asdict()
avstart = vmem["available"]
avstart_mb = avstart / 2 ** 20
log.info(" Starting with {:0.2f}MB of available memory".format(avstart_mb))
# The list of Caches, one per "observation"
caches = list()
# This structure holds external references to cache objects, to ensure that we
# can destroy objects and free memory, even if external references are held.
refs = list()
for ob in range(args.nobs):
ch = Cache()
rf = dict()
for det in range(args.ndet):
dname = "{:04d}".format(det)
cname = "{}_sig".format(dname)
rf[cname] = ch.create(cname, np.float64, (nsampobs,))
cname = "{}_flg".format(dname)
rf[cname] = ch.create(cname, np.uint8, (nsampobs,))
cname = "{}_pix".format(dname)
rf[cname] = ch.create(cname, np.int64, (nsampobs,))
cname = "{}_wgt".format(dname)
rf[cname] = ch.create(cname, np.float32, (nsampobs, 3))
caches.append(ch)
refs.append(rf)
vmem = psutil.virtual_memory()._asdict()
avpost = vmem["available"]
avpost_mb = avpost / 2 ** 20
log.info(" After allocation, {:0.2f}MB of available memory".format(avpost_mb))
diff = avstart_mb - avpost_mb
diffperc = 100.0 * np.absolute(diff - bytes_tot_mb) / bytes_tot_mb
log.info(
" Difference is {:0.2f}MB, expected {:0.2f}MB ({:0.2f}% residual)".format(
diff, bytes_tot_mb, diffperc
)
)
for suf in ["wgt", "pix", "flg", "sig"]:
for ob, ch in zip(range(args.nobs), caches):
for det in range(args.ndet):
dname = "{:04d}".format(det)
ch.destroy("{}_{}".format(dname, suf))
vmem = psutil.virtual_memory()._asdict()
avfinal = vmem["available"]
avfinal_mb = avfinal / 2 ** 20
log.info(
" After destruction, {:0.2f}MB of available memory".format(avfinal_mb)
)
diff = avfinal_mb - avpost_mb
diffperc = 100.0 * np.absolute(diff - bytes_tot_mb) / bytes_tot_mb
log.info(
" Difference is {:0.2f}MB, expected {:0.2f}MB ({:0.2f}% residual)".format(
diff, bytes_tot_mb, diffperc
)
)
return
if __name__ == "__main__":
try:
main()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print("".join(lines), flush=True)
|
hpc4cmbREPO_NAMEtoastPATH_START.@toast_extracted@toast-main@pipelines@toast_cache_test.py@.PATH_END.py
|
{
"filename": "imshow3d.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/mplot3d/imshow3d.py",
"type": "Python"
}
|
"""
===============
2D images in 3D
===============
This example demonstrates how to plot 2D color coded images (similar to
`.Axes.imshow`) as a plane in 3D.
Matplotlib does not have a native function for this. Below we build one by relying
on `.Axes3D.plot_surface`. For simplicity, there are some differences to
`.Axes.imshow`: This function does not set the aspect of the Axes, hence pixels are
not necessarily square. Also, pixel edges are on integer values rather than pixel
centers. Furthermore, many optional parameters of `.Axes.imshow` are not implemented.
Multiple calls of ``imshow3d`` use independent norms and thus different color scales
by default. If you want to have a single common color scale, you need to construct
a suitable norm beforehand and pass it to all ``imshow3d`` calls.
A fundamental limitation of the 3D plotting engine is that intersecting objects cannot
be drawn correctly. One object will always be drawn after the other. Therefore,
multiple image planes can well be used in the background as shown in this example.
But this approach is not suitable if the planes intersect.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import Normalize
def imshow3d(ax, array, value_direction='z', pos=0, norm=None, cmap=None):
"""
Display a 2D array as a color-coded 2D image embedded in 3d.
The image will be in a plane perpendicular to the coordinate axis *value_direction*.
Parameters
----------
ax : Axes3D
The 3D Axes to plot into.
array : 2D numpy array
The image values.
value_direction : {'x', 'y', 'z'}
The axis normal to the image plane.
pos : float
The numeric value on the *value_direction* axis at which the image plane is
located.
norm : `~matplotlib.colors.Normalize`, default: Normalize
The normalization method used to scale scalar data. See `imshow()`.
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The Colormap instance or registered colormap name used to map scalar data
to colors.
"""
if norm is None:
norm = Normalize()
colors = plt.get_cmap(cmap)(norm(array))
if value_direction == 'x':
nz, ny = array.shape
zi, yi = np.mgrid[0:nz + 1, 0:ny + 1]
xi = np.full_like(yi, pos)
elif value_direction == 'y':
nx, nz = array.shape
xi, zi = np.mgrid[0:nx + 1, 0:nz + 1]
yi = np.full_like(zi, pos)
elif value_direction == 'z':
ny, nx = array.shape
yi, xi = np.mgrid[0:ny + 1, 0:nx + 1]
zi = np.full_like(xi, pos)
else:
raise ValueError(f"Invalid value_direction: {value_direction!r}")
ax.plot_surface(xi, yi, zi, rstride=1, cstride=1, facecolors=colors, shade=False)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set(xlabel="x", ylabel="y", zlabel="z")
nx, ny, nz = 8, 10, 5
data_xy = np.arange(ny * nx).reshape(ny, nx) + 15 * np.random.random((ny, nx))
data_yz = np.arange(nz * ny).reshape(nz, ny) + 10 * np.random.random((nz, ny))
data_zx = np.arange(nx * nz).reshape(nx, nz) + 8 * np.random.random((nx, nz))
imshow3d(ax, data_xy)
imshow3d(ax, data_yz, value_direction='x', cmap='magma')
imshow3d(ax, data_zx, value_direction='y', pos=ny, cmap='plasma')
plt.show()
# %%
# .. tags::
# plot-type: 3D,
# styling: colormap,
# level: advanced
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@mplot3d@imshow3d.py@.PATH_END.py
|
{
"filename": "parallel.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/lax/parallel.py",
"type": "Python"
}
|
# Copyright 2019 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parallelization primitives.
"""
from __future__ import annotations
from collections.abc import Sequence
from functools import partial
import itertools
import math
from jax import tree_util
from jax._src import core
from jax._src import config
from jax._src import dispatch
from jax._src import dtypes
from jax._src.sharding_impls import (SPMDAxisContext, ShardingContext,
NamedSharding, PartitionSpec as P)
from jax._src.core import AxisName, ShapedArray
from jax._src.interpreters import ad
from jax._src.interpreters import batching
from jax._src.interpreters import mlir
from jax._src.interpreters import pxla
from jax._src.lax import lax
from jax._src.lax import slicing
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import hlo
from jax._src.util import (canonicalize_axis, moveaxis, safe_map, safe_zip,
unzip2)
import numpy as np
unsafe_map, map = map, safe_map # type: ignore
unsafe_zip, zip = zip, safe_zip # type: ignore
### parallel traceables
def psum(x, axis_name, *, axis_index_groups=None):
"""Compute an all-reduce sum on ``x`` over the pmapped axis ``axis_name``.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
Inputs of boolean dtype are converted to integers before the reduction.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
axis_index_groups: optional list of lists containing axis indices (e.g. for
an axis of size 4, [[0, 1], [2, 3]] would perform psums over the first
two and last two replicas). Groups must cover all axis indices exactly
once.
Returns:
Array(s) with the same shape as ``x`` representing the result of an
all-reduce sum along the axis ``axis_name``.
Examples:
For example, with 4 XLA devices available:
>>> x = np.arange(4)
>>> y = jax.pmap(lambda x: jax.lax.psum(x, 'i'), axis_name='i')(x)
>>> print(y)
[6 6 6 6]
>>> y = jax.pmap(lambda x: x / jax.lax.psum(x, 'i'), axis_name='i')(x)
>>> print(y)
[0. 0.16666667 0.33333334 0.5 ]
Suppose we want to perform ``psum`` among two groups, one with ``device0`` and ``device1``, the other with ``device2`` and ``device3``,
>>> y = jax.pmap(lambda x: jax.lax.psum(x, 'i', axis_index_groups=[[0, 1], [2, 3]]), axis_name='i')(x)
>>> print(y)
[1 1 5 5]
An example using 2D-shaped x. Each row is data from one device.
>>> x = np.arange(16).reshape(4, 4)
>>> print(x)
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]
[12 13 14 15]]
Full ``psum`` across all devices:
>>> y = jax.pmap(lambda x: jax.lax.psum(x, 'i'), axis_name='i')(x)
>>> print(y)
[[24 28 32 36]
[24 28 32 36]
[24 28 32 36]
[24 28 32 36]]
Perform ``psum`` among two groups:
>>> y = jax.pmap(lambda x: jax.lax.psum(x, 'i', axis_index_groups=[[0, 1], [2, 3]]), axis_name='i')(x)
>>> print(y)
[[ 4 6 8 10]
[ 4 6 8 10]
[20 22 24 26]
[20 22 24 26]]
"""
if not isinstance(axis_name, (tuple, list)):
axis_name = (axis_name,)
if any(isinstance(axis, int) for axis in axis_name) and axis_index_groups is not None:
raise ValueError("axis_index_groups only supported for sums over just named axes")
_validate_reduce_axis_index_groups(axis_index_groups)
leaves, treedef = tree_util.tree_flatten(x)
leaves = [lax.convert_element_type(l, np.int32)
if dtypes.dtype(l) == np.bool_ else l for l in leaves]
axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups)
# handle the constant case specially
if all(not isinstance(leaf, core.Tracer) for leaf in leaves):
named_axes, pos_axes = axes_partition = [], []
for axis in axis_name:
axes_partition[isinstance(axis, int)].append(axis)
def pos_reduce(x):
if not pos_axes:
return x
return lax._reduce_sum(x, [canonicalize_axis(axis, getattr(x, 'ndim', 0))
for axis in pos_axes])
if axis_index_groups is not None:
assert not pos_axes
size = len(axis_index_groups[0])
else:
size = math.prod([core.get_axis_env().axis_size(name) for name in named_axes])
out_flat = tuple(lax._const(leaf, size) * pos_reduce(leaf) for leaf in leaves)
else:
out_flat = psum_p.bind(
*leaves, axes=tuple(axis_name), axis_index_groups=axis_index_groups)
return tree_util.tree_unflatten(treedef, out_flat)
def pmean(x, axis_name, *, axis_index_groups=None):
"""Compute an all-reduce mean on ``x`` over the pmapped axis ``axis_name``.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
axis_index_groups: optional list of lists containing axis indices (e.g. for
an axis of size 4, [[0, 1], [2, 3]] would perform pmeans over the first
two and last two replicas). Groups must cover all axis indices exactly
once, and on TPUs all groups must be the same size.
Returns:
Array(s) with the same shape as ``x`` representing the result of an
all-reduce mean along the axis ``axis_name``.
For example, with 4 XLA devices available:
>>> x = np.arange(4)
>>> y = jax.pmap(lambda x: jax.lax.pmean(x, 'i'), axis_name='i')(x)
>>> print(y)
[1.5 1.5 1.5 1.5]
>>> y = jax.pmap(lambda x: x / jax.lax.pmean(x, 'i'), axis_name='i')(x)
>>> print(y)
[0. 0.6666667 1.3333334 2. ]
"""
x = psum(x, axis_name=axis_name, axis_index_groups=axis_index_groups)
n = psum(1, axis_name=axis_name, axis_index_groups=axis_index_groups)
return tree_util.tree_map(lambda v: v / n, x)
def pmax(x, axis_name, *, axis_index_groups=None):
"""Compute an all-reduce max on ``x`` over the pmapped axis ``axis_name``.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
axis_index_groups: optional list of lists containing axis indices (e.g. for
an axis of size 4, [[0, 1], [2, 3]] would perform pmaxes over the first
two and last two replicas). Groups must cover all axis indices exactly
once, and on TPUs all groups must be the same size.
Returns:
Array(s) with the same shape as ``x`` representing the result of an
all-reduce max along the axis ``axis_name``.
"""
if not isinstance(axis_name, (tuple, list)):
axis_name = (axis_name,)
if any(isinstance(axis, int) for axis in axis_name) and axis_index_groups is not None:
raise ValueError("axis_index_groups only supported for sums over just named axes")
_validate_reduce_axis_index_groups(axis_index_groups)
leaves, treedef = tree_util.tree_flatten(x)
axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups)
out_flat = pmax_p.bind(*leaves, axes=axis_name,
axis_index_groups=axis_index_groups)
return tree_util.tree_unflatten(treedef, out_flat)
def pmin(x, axis_name, *, axis_index_groups=None):
"""Compute an all-reduce min on ``x`` over the pmapped axis ``axis_name``.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
axis_index_groups: optional list of lists containing axis indices (e.g. for
an axis of size 4, [[0, 1], [2, 3]] would perform pmins over the first
two and last two replicas). Groups must cover all axis indices exactly
once, and on TPUs all groups must be the same size.
Returns:
Array(s) with the same shape as ``x`` representing the result of an
all-reduce min along the axis ``axis_name``.
"""
if not isinstance(axis_name, (tuple, list)):
axis_name = (axis_name,)
if any(isinstance(axis, int) for axis in axis_name) and axis_index_groups is not None:
raise ValueError("axis_index_groups only supported for sums over just named axes")
_validate_reduce_axis_index_groups(axis_index_groups)
leaves, treedef = tree_util.tree_flatten(x)
axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups)
out_flat = pmin_p.bind(*leaves, axes=axis_name,
axis_index_groups=axis_index_groups)
return tree_util.tree_unflatten(treedef, out_flat)
# TODO(mattjj): add a pargmin_p, or add named axis support to lax.argmin_p
def pargmin(x, axis_name):
if isinstance(axis_name, (tuple, list)):
raise TypeError(f"pargmin only accepts a single axis, got {axis_name}")
return _axis_index_of_val(x, pmin(x, axis_name), axis_name)
# TODO(mattjj): add a pargmax_p, or add named axis support to lax.argmax_p
def pargmax(x, axis_name):
if isinstance(axis_name, (tuple, list)):
raise TypeError(f"pargmin only accepts a single axis, got {axis_name}")
return _axis_index_of_val(x, pmax(x, axis_name), axis_name)
def _axis_index_of_val(x, val, axis_name):
idx = axis_index(axis_name)
mask = (val == x)
validx = lax.select(mask,
lax.full(mask.shape, idx),
lax.full(mask.shape, dtypes.iinfo(dtypes.dtype(idx)).max, dtypes.dtype(idx)))
return pmin(validx, axis_name)
def _validate_reduce_axis_index_groups(axis_index_groups):
if axis_index_groups is None:
return
axis_space = range(sum(len(group) for group in axis_index_groups))
if {i for g in axis_index_groups for i in g} != set(axis_space):
raise ValueError("axis_index_groups must cover all indices exactly once")
def _canonicalize_axis_index_groups(axis_index_groups):
if axis_index_groups is None:
return
return tuple(map(tuple, axis_index_groups))
def pbroadcast(x, axis_name, source):
"""Perform a collective broadcast and replicate from ``source``.
This is equivalent to
```
def pbroadcast(x, axis_name, source):
masked = jnp.where(axis_index(axis_name) == source, x, zeros_like(x))
return psum(masked, axis_name)
```
but implemented in a hardware optimized way.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
This function is an analog of the CollectiveBroadcast HLO.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
source: int, representing which index into ``axis_name`` that should be copied.
Returns:
Array(s) with ``x`` being copied from the ``source`` index slice of ``axis_name``.
"""
return tree_util.tree_map(
partial(pbroadcast_p.bind, axis_name=axis_name, source=source), x)
def ppermute(x, axis_name, perm):
"""Perform a collective permutation according to the permutation ``perm``.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
This function is an analog of the CollectivePermute HLO.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
perm: list of pairs of ints, representing
``(source_index, destination_index)``
pairs that encode how the mapped axis named ``axis_name`` should be
shuffled. The integer values are treated as indices into the mapped axis
``axis_name``. Any two pairs should not have the same source index or the
same destination index. For each index of the axis ``axis_name`` that does
not correspond to a destination index in ``perm``, the corresponding
values in the result are filled with zeros of the appropriate type.
Returns:
Array(s) with the same shape as ``x`` with slices along the axis
``axis_name`` gathered from ``x`` according to the permutation ``perm``.
"""
if not isinstance(axis_name, (list, tuple)):
axis_name = (axis_name,)
return tree_util.tree_map(
partial(ppermute_p.bind, axis_name=axis_name,
perm=tuple(map(tuple, perm))), x)
def pshuffle(x, axis_name, perm):
"""Convenience wrapper of jax.lax.ppermute with alternate permutation encoding
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
perm: list of ints encoding sources for the permutation to be applied to
the axis named ``axis_name``, so that the output at axis index i
comes from the input at axis index perm[i]. Every integer in [0, N) should
be included exactly once for axis size N.
Returns:
Array(s) with the same shape as ``x`` with slices along the axis
``axis_name`` gathered from ``x`` according to the permutation ``perm``.
"""
if set(perm) != set(range(len(perm))):
raise ValueError(f"`perm` does not represent a permutation: {perm}")
return ppermute(x, axis_name, list(zip(perm, range(len(perm)))))
def pswapaxes(x, axis_name, axis, *, axis_index_groups=None):
"""Swap the pmapped axis ``axis_name`` with the unmapped axis ``axis``.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
The group size of the mapped axis size must be equal to the size of the
unmapped axis; that is, we must have
``lax.psum(1, axis_name, axis_index_groups=axis_index_groups) == x.shape[axis]``.
By default, when ``axis_index_groups=None``, this encompasses all the devices.
This function is a special case of ``all_to_all`` where the pmapped axis of
the input is placed at the position ``axis`` in the output. That is, it is
equivalent to ``all_to_all(x, axis_name, axis, axis)``.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
axis: int indicating the unmapped axis of ``x`` to map with the name
``axis_name``.
axis_index_groups: optional list of lists containing axis indices (e.g. for
an axis of size 4, [[0, 1], [2, 3]] would run pswapaxes over the first
two and last two replicas). Groups must cover all axis indices exactly
once, and all groups must be the same size.
Returns:
Array(s) with the same shape as ``x``.
"""
return all_to_all(x, axis_name, axis, axis, axis_index_groups=axis_index_groups)
def all_to_all(x, axis_name, split_axis, concat_axis, *, axis_index_groups=None, tiled=False):
"""Materialize the mapped axis and map a different axis.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
In the output, the input mapped axis ``axis_name`` is materialized at the
logical axis position ``concat_axis``, and the input unmapped axis at position
``split_axis`` is mapped with the name ``axis_name``.
The group size of the mapped axis size must be equal to the size of the
unmapped axis; that is, we must have
``lax.psum(1, axis_name, axis_index_groups=axis_index_groups) == x.shape[axis]``.
By default, when ``axis_index_groups=None``, this encompasses all the devices.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
split_axis: int indicating the unmapped axis of ``x`` to map with the name
``axis_name``.
concat_axis: int indicating the position in the output to materialize the
mapped axis of the input with the name ``axis_name``.
axis_index_groups: optional list of lists containing axis indices (e.g. for
an axis of size 4, [[0, 1], [2, 3]] would run all_to_all over the first
two and last two replicas). Groups must cover all axis indices exactly
once, and all groups must be the same size.
tiled: when True, all_to_all will divide split_axis into chunks and concatenate
them along concat_axis. In particular, no dimensions are added or removed.
False by default.
Returns:
When tiled is False, array(s) with shape given by the expression::
np.insert(np.delete(x.shape, split_axis), concat_axis, axis_size)
where ``axis_size`` is the size of the mapped axis named ``axis_name`` in
the input ``x``, i.e. ``axis_size = lax.psum(1, axis_name)``.
Otherwise array with shape similar to the input shape, except with split_axis
divided by axis size and concat_axis multiplied by axis size.
"""
axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups)
def bind(x, split_axis=split_axis, concat_axis=concat_axis):
group_size = psum(1, axis_name, axis_index_groups=axis_index_groups)
if tiled:
if x.shape[split_axis] % group_size != 0:
raise ValueError(f"The size of all_to_all split_axis ({x.shape[split_axis]}) "
f"has to be divisible by the size of the named axis "
f"{axis_name} ({group_size})")
else:
if group_size != x.shape[split_axis]:
msg = ("all_to_all requires the size of the mapped axis axis_name to "
"equal x.shape[split_axis], but they are {} and {} respectively.")
raise ValueError(msg.format(group_size, x.shape[split_axis]))
if split_axis < concat_axis:
concat_axis += 1 # concat_axis gives a position _after_ split_axis is removed
x = lax.expand_dims(x, (concat_axis,)) # insert the new axis
elif split_axis == concat_axis:
pass
else: # concat_axis < split_axis
x = lax.expand_dims(x, (concat_axis,)) # insert the new axis
split_axis += 1 # we have a new axis before split_axis now
result = all_to_all_p.bind(x, split_axis=split_axis, concat_axis=concat_axis,
axis_name=axis_name,
axis_index_groups=axis_index_groups,
tiled=tiled)
if not tiled and split_axis != concat_axis:
result = lax.squeeze(result, (split_axis,))
return result
return tree_util.tree_map(bind, x)
def ragged_all_to_all(operand, output, input_offsets, send_sizes, output_offsets, recv_sizes):
"""Ragged version of :func:`all_to_all`.
For now, ``split_axis`` and ``concat_axis`` from `all_to_all` are equivalent
and the outermost (ragged) dimension. ``axis_index_groups`` is default to all
replicas (e.g. there is only one group and covers all axis indices).
Ragged arrays are defined by a set of three arrays:
* ``data``: the ``data`` array is "ragged" along its outermost dimension,
along which each indexed element has variable size.
* ``offsets``: the ``offsets`` array indexes the outermost dimension of the
``data`` array, and represents the starting offset of each ragged element of
the ``data`` array.
* ``sizes``: the ``sizes`` array represents the size of each ragged element of
the ``data`` array, where the size is specified in units of sub-elements. A
sub-element is defined as the suffix of the ``data`` array shape obtained by
removing the outermost "ragged" dimension.
The ``offsets`` and ``sizes`` arrays must have the same size.
# Example ragged tensor
data: [8,3] = {{a,b,c},{d,e,f},{g,h,i},{j,k,l},{m,n,o},{p,q,r},{s,t,u},{v,w,x}}
offsets: [3] = {0, 1, 4}
sizes: [3] = {1, 3, 4}
# Index 'data' at 'offsets'[0], 'sizes'[0]'
{a,b,c}
# Index 'data' at 'offsets'[1], 'sizes'[1]'
{d,e,f},{g,h,i},{j,k,l}
# Index 'data' at 'offsets'[2], 'sizes'[2]'
{m,n,o},{p,q,r},{s,t,u},{v,w,x}
Args:
operand: array with ragged dimension along its outermost dimension.
output: array of ragged input offsets.
input_offsets: array of ragged input send sizes.
send_sizes: array of ragged output data.
output_offsets: array of ragged output offsets.
recv_sizes: array of ragged output receive sizes.
Returns:
array with shape equal to ``output``.
"""
return ragged_all_to_all_p.bind(operand, output, input_offsets, send_sizes,
output_offsets, recv_sizes)
ragged_all_to_all_p = core.Primitive('ragged_all_to_all')
def axis_index(axis_name):
"""Return the index along the mapped axis ``axis_name``.
Args:
axis_name: hashable Python object used to name the mapped axis.
Returns:
An integer representing the index.
For example, with 8 XLA devices available:
>>> from functools import partial
>>> @partial(jax.pmap, axis_name='i')
... def f(_):
... return lax.axis_index('i')
...
>>> f(np.zeros(4))
Array([0, 1, 2, 3], dtype=int32)
>>> f(np.zeros(8))
Array([0, 1, 2, 3, 4, 5, 6, 7], dtype=int32)
>>> @partial(jax.pmap, axis_name='i')
... @partial(jax.pmap, axis_name='j')
... def f(_):
... return lax.axis_index('i'), lax.axis_index('j')
...
>>> x, y = f(np.zeros((4, 2)))
>>> print(x)
[[0 0]
[1 1]
[2 2]
[3 3]]
>>> print(y)
[[0 1]
[0 1]
[0 1]
[0 1]]
"""
if not isinstance(axis_name, (tuple, list)):
return axis_index_p.bind(axis_name=axis_name)
else:
inner_size = 1
index = 0
for name in reversed(axis_name):
index += axis_index(name) * inner_size
inner_size *= psum(1, name)
return index
def pgather(src, idx, axes: int | AxisName):
"""Uses the last positional axis of idx to index into src's axes."""
if not isinstance(axes, (tuple, list)):
axes = (axes,)
# TODO: Canonicalize exes!
return pgather_p.bind(src, idx, axes=tuple(axes))
### parallel primitives
def _names_in_param(pname: str, params: core.ParamDict) -> tuple[str]:
axis_names = params[pname]
if isinstance(axis_names, (tuple, list)):
return tuple(axis_names)
else:
return (axis_names,)
def _constant_reduction(prim, axis_data, args, axes, axis_index_groups):
assert axis_data.name in axes
if axis_index_groups: raise NotImplementedError
new_axes = tuple(n for n in axes if n != axis_data.name)
if new_axes:
args = prim.bind(*args, axes=new_axes, axis_index_groups=axis_index_groups)
if prim is psum_p:
outs = [lax._const(x, axis_data.size) * x for x in args]
elif prim in (pmin_p, pmax_p):
outs = args
else:
raise Exception(f"Unrecognized reducer: {prim}")
return outs, [None] * len(outs)
def _reduction_with_positional_batcher(
prim, vals_in, dims_in, axis_index_groups,
transform_unmapped, transform_mapped):
if axis_index_groups is not None:
raise NotImplementedError("axis_index_groups not supported in vmap collectives. "
"Please open a feature request!")
vals_in = [val if d is batching.not_mapped or d == 0 else _moveaxis(d, 0, val)
for val, d in zip(vals_in, dims_in)]
mapped_vals_in, unmapped_vals_in = partitioned_vals_in = [], []
mapped_idxs, unmapped_idxs = partitioned_idxs = [], []
for i, (val, d) in enumerate(zip(vals_in, dims_in)):
partitioned_vals_in[d is batching.not_mapped].append(val)
partitioned_idxs[d is batching.not_mapped].append(i)
vals_out = [None] * len(vals_in)
if unmapped_vals_in:
unmapped_axes, unmapped_vals_in = transform_unmapped(0, unmapped_vals_in)
unmapped_vals_out = prim.bind(*unmapped_vals_in, axes=unmapped_axes, axis_index_groups=None)
for i, val in zip(unmapped_idxs, unmapped_vals_out):
vals_out[i] = val
if mapped_vals_in:
mapped_axes, mapped_vals_in = transform_mapped(0, mapped_vals_in)
mapped_vals_out = prim.bind(*mapped_vals_in, axes=mapped_axes, axis_index_groups=None)
for i, val in zip(mapped_idxs, mapped_vals_out):
vals_out[i] = val
assert all(v is not None for v in vals_out)
return vals_out
def _reduction_batcher(prim, vals_in, dims_in, *, axes, axis_index_groups):
assert prim.multiple_results
if not any(isinstance(axis, int) for axis in axes):
return prim.bind(*vals_in, axes=axes, axis_index_groups=axis_index_groups), dims_in
vals_out = _reduction_with_positional_batcher(
prim, vals_in, dims_in, axis_index_groups,
lambda d, d_vals_in: (axes, d_vals_in),
lambda d, d_vals_in: (tuple(axis + (axis >= d) if isinstance(axis, int) else axis
for axis in axes),
d_vals_in))
# _reduction_with_positional_batcher moves all map dims to 0
return vals_out, [d if d is batching.not_mapped else 0 for d in dims_in]
def _batched_reduction_collective(
prim, if_unmapped, axis_data, vals_in, dims_in, axes,
axis_index_groups):
assert prim.multiple_results
if all(d is None for d in dims_in):
if axis_data.name in axes:
return _constant_reduction(prim, axis_data, vals_in, axes, axis_index_groups)
else:
return prim.bind(*vals_in, axes=axes, axis_index_groups=axis_index_groups), dims_in
if axis_data.name not in axes:
return _reduction_batcher(prim, vals_in, dims_in, axes=axes,
axis_index_groups=axis_index_groups)
# Note that we have a choice here. We can either unfuse the reduction into one
# that handles the batched dims and then another one that handles the rest.
# Alternatively, we can keep the dimension reduction fused with the rest, but
# we have to split the primitive into one for unmapped inputs and another
# one for mapped, because they differ in their `axes` parameter.
# We choose the second strategy here.
vals_out = _reduction_with_positional_batcher(
prim, vals_in, dims_in, axis_index_groups,
lambda d, d_vals_in: (tuple(axis for axis in axes if axis != axis_data.name),
[if_unmapped(v, axis_data.size) for v in d_vals_in]),
lambda d, d_vals_in: (tuple(axis + (axis >= d) if isinstance(axis, int) else
axis if axis != axis_data.name else
d for axis in axes),
d_vals_in))
return vals_out, [batching.not_mapped] * len(vals_out)
def _replica_groups(axis_env, axis_name, axis_index_groups):
replica_groups = pxla.axis_groups(axis_env, axis_name)
if axis_index_groups is not None:
replica_groups = [[axis_group[i] for i in axis_index_group]
for axis_group in replica_groups
for axis_index_group in axis_index_groups]
return replica_groups
def _replica_groups_hlo(replica_groups: Sequence[Sequence[int]]
) -> ir.DenseElementsAttr:
# Uneven replica groups are padded with -1.
groups = np.array(list(itertools.zip_longest(*replica_groups, fillvalue=-1)),
dtype=np.int64).T
return ir.DenseIntElementsAttr.get(np.ascontiguousarray(groups))
def _allreduce_impl(prim, pos_reducer, *args, axes, axis_index_groups):
assert axis_index_groups is None
if not all(isinstance(axis, int) for axis in axes):
return dispatch.apply_primitive(prim, *args, axes=axes,
axis_index_groups=axis_index_groups)
assert all(isinstance(axis, int) for axis in axes)
return [pos_reducer(arg, axes) for arg in args]
def _allreduce_effectful_abstract_eval(*args, axes, axis_index_groups):
_check_axis_names(axes)
named_axes = tuple(axis for axis in axes if not isinstance(axis, int))
pos_axes = tuple(axis for axis in axes if isinstance(axis, int))
if axis_index_groups is not None:
if len(pos_axes) != 0:
raise ValueError(f"axis_index_groups can only be used with reductions over "
f"named axes, but got: {axes}")
if config.sharding_in_types.value:
out_avals = [
ShapedArray(lax._reduce_op_shape_rule(arg, axes=pos_axes), arg.dtype,
sharding=lax._reduce_op_sharding_rule(arg, axes=pos_axes))
for arg in args
]
else:
out_avals = [ShapedArray(lax._reduce_op_shape_rule(arg, axes=pos_axes), arg.dtype)
for arg in args]
return out_avals, {core.NamedAxisEffect(axis) for axis in named_axes}
def _check_axis_names(axes):
named_axes = tuple(axis for axis in axes if not isinstance(axis, int))
axis_env = core.get_axis_env()
for name in named_axes:
if not axis_env.axis_exists(name):
raise NameError(f"unbound axis name: {name}")
def _allreduce_lowering(prim, pos_fn, ctx, *args, axes, axis_index_groups):
if axis_index_groups is not None and ("tpu" in ctx.module_context.platforms):
len_0 = len(axis_index_groups[0])
if any(len(g) != len_0 for g in axis_index_groups):
raise ValueError("axis_index_groups must all be the same size for TPU lowering")
named_axes, positional_axes = axes_partition = [], []
for axis in axes:
axes_partition[isinstance(axis, int)].append(axis)
if positional_axes:
reducer = mlir.lower_fun(pos_fn, multiple_results=False)
def _positional_reduce(aval, arg):
aval_out = aval.update(
shape=np.delete(np.array(aval.shape, dtype=np.int64),
positional_axes))
reducer_ctx = ctx.replace(primitive=None, avals_in=[aval], avals_out=[aval_out])
out, = reducer(reducer_ctx, arg, axes=tuple(positional_axes))
return out
args = map(_positional_reduce, ctx.avals_in, args)
if not named_axes:
return args
replica_groups = _replica_groups_hlo(
_replica_groups(ctx.module_context.axis_env, named_axes,
axis_index_groups))
axis_context = ctx.module_context.axis_context
is_spmd = isinstance(axis_context, (SPMDAxisContext, ShardingContext))
def all_reduce(aval, x):
if is_spmd:
channel = ctx.module_context.new_channel()
other_args = dict(
channel_handle=hlo.ChannelHandle.get(
channel, mlir.DEVICE_TO_DEVICE_TYPE),
use_global_device_ids=ir.BoolAttr.get(True))
else:
other_args = {}
if hlo.get_api_version() < 8:
op = hlo.AllReduceOp(
x.type, x, replica_groups=replica_groups, **other_args)
else:
op = hlo.AllReduceOp(
[x.type], [x], replica_groups=replica_groups, **other_args)
if config.sharding_in_types.value:
scalar_aval = core.ShapedArray(
(), aval.dtype, sharding=NamedSharding(aval.sharding.mesh, P()))
else:
scalar_aval = core.ShapedArray((), aval.dtype)
scalar_type = mlir.aval_to_ir_type(scalar_aval)
reducer_block = op.regions[0].blocks.append(scalar_type, scalar_type)
with ir.InsertionPoint(reducer_block):
lower_reducer = mlir.lower_fun(prim.bind, multiple_results=False)
reducer_ctx = ctx.replace(primitive=None,
avals_in=[scalar_aval] * 2, avals_out=[scalar_aval])
out_nodes = lower_reducer(reducer_ctx, *reducer_block.arguments)
hlo.return_(mlir.flatten_ir_values(out_nodes))
return op.result
return [all_reduce(aval, x) for aval, x in zip(ctx.avals_in, args)]
def _psum_transpose_rule(cts, *args, axes, axis_index_groups):
named_axes, pos_axes = axes_partition = [], []
for axis in axes:
axes_partition[isinstance(axis, int)].append(axis)
if pos_axes:
def broadcast_positional(ct, arg):
assert ad.is_undefined_primal(arg)
if type(ct) is ad.Zero: return ad.Zero(arg.aval)
return lax._reduce_sum_transpose_rule(ct, arg, axes=pos_axes)[0]
cts = map(broadcast_positional, cts, args)
# We treat psum as psum + pbroadcast, which is why the transpose reduces
# over the named axes again (unlike for positional axes).
nonzero_out_cts, treedef = tree_util.tree_flatten(cts)
nonzero_in_cts = psum_p.bind(*nonzero_out_cts, axes=tuple(named_axes),
axis_index_groups=axis_index_groups)
return tree_util.tree_unflatten(treedef, nonzero_in_cts)
psum_p = core.Primitive('psum')
psum_p.multiple_results = True
psum_p.def_impl(partial(_allreduce_impl, psum_p, lax._reduce_sum))
psum_p.def_effectful_abstract_eval(_allreduce_effectful_abstract_eval)
mlir.register_lowering(
psum_p, partial(_allreduce_lowering, lax.add_p, lax._reduce_sum))
ad.deflinear2(psum_p, _psum_transpose_rule)
batching.fancy_primitive_batchers[psum_p] = \
partial(_batched_reduction_collective, psum_p, lambda v, axis_size: axis_size * v)
batching.skippable_batchers[psum_p] = partial(_names_in_param, 'axes')
pmax_p = core.Primitive('pmax')
pmax_p.multiple_results = True
pmax_p.def_impl(partial(_allreduce_impl, pmax_p, lax._reduce_max))
pmax_p.def_effectful_abstract_eval(_allreduce_effectful_abstract_eval)
mlir.register_lowering(
pmax_p, partial(_allreduce_lowering, lax.max_p, lax._reduce_max))
batching.fancy_primitive_batchers[pmax_p] = \
partial(_batched_reduction_collective, pmax_p, lambda v, axis_size: v)
batching.skippable_batchers[pmax_p] = partial(_names_in_param, 'axes')
pmin_p = core.Primitive('pmin')
pmin_p.multiple_results = True
pmin_p.def_impl(partial(_allreduce_impl, pmin_p, lax._reduce_min))
pmin_p.def_effectful_abstract_eval(_allreduce_effectful_abstract_eval)
mlir.register_lowering(
pmin_p, partial(_allreduce_lowering, lax.min_p, lax._reduce_min))
batching.fancy_primitive_batchers[pmin_p] = \
partial(_batched_reduction_collective, pmin_p, lambda v, axis_size: v)
batching.skippable_batchers[pmin_p] = partial(_names_in_param, 'axes')
def _ppermute_lowering(ctx, x, *, axis_name, perm):
replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name, None)
group_size = len(replica_groups[0])
srcs, dsts = unzip2((src % group_size, dst % group_size) for src, dst in perm)
if not (len(srcs) == len(set(srcs)) and len(dsts) == len(set(dsts))):
msg = "ppermute sources and destinations must be unique, got {}."
raise ValueError(msg.format(perm))
full_perm = np.zeros((len(replica_groups), len(perm), 2), np.int64)
for i, grp in enumerate(replica_groups):
grp = sorted(grp)
for j, (src, dst) in enumerate(perm):
full_perm[i, j, 0] = grp[src]
full_perm[i, j, 1] = grp[dst]
full_perm = full_perm.reshape((-1, 2))
axis_context = ctx.module_context.axis_context
is_manual = (
isinstance(axis_context, SPMDAxisContext)
and axis_context.manual_axes
)
if is_manual:
channel = ctx.module_context.new_channel()
other_args = dict(
channel_handle=hlo.ChannelHandle.get(channel, mlir.DEVICE_TO_DEVICE_TYPE))
else:
other_args = {}
return hlo.CollectivePermuteOp(
x, mlir.dense_int_elements(full_perm), **other_args).results
def _ppermute_transpose_rule(t, x, perm, axis_name):
srcs, dsts = unzip2(perm)
inverse_perm = list(zip(dsts, srcs))
return [ppermute(t, axis_name=axis_name, perm=inverse_perm)]
def _ppermute_batcher(axis_data, vals_in, dims_in, axis_name, perm):
axis_size, frame_name = axis_data.size, axis_data.name
(v,), (d,) = vals_in, dims_in
if not isinstance(axis_name, (tuple, list)):
axis_name = (axis_name,)
if axis_data.name not in axis_name:
return ppermute_p.bind(v, perm=perm, axis_name=axis_name), d
remaining_axes = tuple(axis for axis in axis_name if axis != frame_name)
if remaining_axes:
return ppermute_p.bind(v, perm=perm, axis_name=remaining_axes), d
assert axis_name[0] == frame_name, "ppermute batcher called with a wrong axis!"
assert len(perm) == axis_size, "Permutation doesn't match the axis size!"
if d is batching.not_mapped:
return v, d
perm_indices = np.zeros(axis_size, dtype=int)
for src, dst in perm:
perm_indices[dst] = src
return v.take(perm_indices, d), d
def _raise_to_shaped_abstract_eval(x, *, axis_name, **params):
_check_axis_names(axis_name)
return x
ppermute_p = core.Primitive('ppermute')
ppermute_p.def_abstract_eval(_raise_to_shaped_abstract_eval)
ad.deflinear2(ppermute_p, _ppermute_transpose_rule)
mlir.register_lowering(ppermute_p, _ppermute_lowering)
batching.fancy_primitive_batchers[ppermute_p] = _ppermute_batcher
batching.skippable_batchers[ppermute_p] = partial(_names_in_param, 'axis_name')
def _pbroadcast_transpose_rule(t, x, source, axis_name):
is_source = axis_index(axis_name) == source
tsum = psum(t, axis_name)
return [lax.select(is_source, lax.full_like(t, tsum), lax.full_like(t, 0))]
def _pbroadcast_batcher(axis_data, vals_in, dims_in, axis_name, source):
axis_size = axis_data.size
(v,), (d,) = vals_in, dims_in
if not isinstance(axis_name, (tuple, list)):
axis_name = (axis_name,)
if axis_data.name not in axis_name:
return pbroadcast_p.bind(v, axis_name=axis_name, source=source), d
remaining_axes = tuple(axis for axis in axis_name if axis != axis_data.name)
if remaining_axes:
raise NotImplementedError("pbroadcast batcher only supports a single axis")
assert axis_name[0] == axis_data.name, "pbroadcast batcher called with a wrong axis!"
assert source >= 0 and source < axis_size, "collective broadcast doesn't fit in the axis size!"
if axis_size == 1 and remaining_axes:
return pbroadcast_p.bind(v, source=source, axis_name=remaining_axes), d
if d is batching.not_mapped:
return v, d
return v.take([source] * axis_size, d), d
def _pbroadcast_lowering(ctx, x, *, axis_name, source):
replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name, None)
def source_to_front(group):
return [group[source]] + list(group[:source]) + list(group[source + 1:])
replica_groups = [source_to_front(group) for group in replica_groups]
channel = ctx.module_context.new_channel()
return hlo.CollectiveBroadcastOp(
x, replica_groups=_replica_groups_hlo(replica_groups)).results
pbroadcast_p = core.Primitive('pbroadcast')
pbroadcast_p.def_abstract_eval(_raise_to_shaped_abstract_eval)
ad.deflinear2(pbroadcast_p, _pbroadcast_transpose_rule)
mlir.register_lowering(pbroadcast_p, _pbroadcast_lowering)
batching.fancy_primitive_batchers[pbroadcast_p] = _pbroadcast_batcher
batching.skippable_batchers[pbroadcast_p] = partial(_names_in_param, 'axis_name')
def _moveaxis(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
return lax.transpose(x, perm)
def _splitaxis(axis, factor, x):
new_shape = list(x.shape)
assert new_shape[axis] % factor == 0, (new_shape[axis], factor)
new_shape[axis:axis+1] = [factor, new_shape[axis] // factor]
return x.reshape(new_shape)
def _foldaxis(axis, x):
new_shape = list(x.shape)
new_shape[axis:axis+2] = [x.shape[axis] * x.shape[axis + 1]]
return x.reshape(new_shape)
def _all_to_all_lowering(
ctx, x, *, split_axis, concat_axis, axis_name, axis_index_groups, tiled
):
del tiled # expand_dims and squeeze is done in `all_to_all` if `True`
# Workaround for AllToAll not being implemented on CPU.
replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name,
axis_index_groups)
if len(replica_groups[0]) == 1:
return [x]
split_count = len(replica_groups[0])
if not all(split_count == len(g) for g in replica_groups):
raise ValueError('Replica groups must be equally sized')
is_spmd = isinstance(
ctx.module_context.axis_context,
(SPMDAxisContext, ShardingContext),
)
if is_spmd:
# We want to emit the all-gather with global device IDs and a unique
# channel ID, as otherwise it interprets the devices as replicas instead
# of partitions - and XLA is configured with only a single replica.
channel = ctx.module_context.new_channel()
channel_handle = hlo.ChannelHandle.get(channel, mlir.DEVICE_TO_DEVICE_TYPE)
other_args = dict(channel_handle=channel_handle)
else:
other_args = {}
if hlo.get_api_version() < 8:
return hlo.AllToAllOp(
x,
split_dimension=mlir.i64_attr(split_axis),
concat_dimension=mlir.i64_attr(concat_axis),
split_count=mlir.i64_attr(split_count),
replica_groups=_replica_groups_hlo(replica_groups),
**other_args).results
return hlo.AllToAllOp(
[x],
split_dimension=mlir.i64_attr(split_axis),
concat_dimension=mlir.i64_attr(concat_axis),
split_count=mlir.i64_attr(split_count),
replica_groups=_replica_groups_hlo(replica_groups),
**other_args).results
def _all_to_all_transpose_rule(
cts, x, axis_name, split_axis, concat_axis, axis_index_groups, tiled
):
return (all_to_all(
cts,
axis_name=axis_name,
split_axis=concat_axis,
concat_axis=split_axis,
axis_index_groups=axis_index_groups,
tiled=tiled),)
def _all_to_all_batcher(vals_in, dims_in, *, axis_name, split_axis, concat_axis, axis_index_groups,
tiled):
x, = vals_in
d, = dims_in
result = all_to_all_p.bind(
x,
axis_name=axis_name,
split_axis=split_axis + (d <= split_axis),
concat_axis=concat_axis + (d <= concat_axis),
axis_index_groups=axis_index_groups,
tiled=tiled,
)
return result, d
def _all_to_all_batched_collective(axis_data, vals_in, dims_in,
axis_name, split_axis, concat_axis,
axis_index_groups, tiled):
axis_size, frame_name = axis_data.size, axis_data.name
if axis_index_groups is not None:
raise NotImplementedError("Please open a feature request!")
if isinstance(axis_name, (list, tuple)):
axes_names = axis_name
else:
axes_names = [axis_name]
if axis_data.name not in axes_names:
return _all_to_all_batcher(
vals_in, dims_in, axis_name=axis_name, split_axis=split_axis,
concat_axis=concat_axis, axis_index_groups=axis_index_groups, tiled=tiled)
x, = vals_in
d, = dims_in
if d is batching.not_mapped:
# TODO(sharadmv,apaszke): Remove this broadcast that comes from
# all_gather_transpose and instead avoid using all_to_all in
# all_gather_transpose.
x = lax.broadcast(x, (axis_size, *x.shape))
d = 0
if isinstance(axis_name, (list, tuple)):
pos = axis_name.index(frame_name)
major_axes, minor_axes = axis_name[:pos], axis_name[pos + 1:]
else:
major_axes, minor_axes = (), ()
# Optimized case when no splitting is necessary
if not major_axes and not minor_axes:
if split_axis == concat_axis:
axis = split_axis + (d <= split_axis)
d_pre_split = d
x = _splitaxis(axis, axis_size, x)
d += (axis <= d)
return _foldaxis(axis, moveaxis(x, (d, axis), (axis, d))), d_pre_split
else:
x_concat = _foldaxis(concat_axis, _moveaxis(d, concat_axis, x))
return _splitaxis(split_axis, axis_size, x_concat), split_axis
# Here we have to handle either the major or the minor dimensions
# We will be accumulating chunks into the three leading dims: [Major, Current, Minor, ...]
x, d = lax.expand_dims(_moveaxis(d, 0, x), (0, 2)), 1
split_axis += 3; concat_axis += 3 # Offset by extra three leading dims
if major_axes:
x = all_to_all_p.bind(x, axis_name=major_axes,
split_axis=split_axis, concat_axis=0,
axis_index_groups=axis_index_groups,
tiled=tiled)
# Split out the local part into axis new_d (NOTE: d is already in axis 1)
x = _splitaxis(split_axis, axis_size, x)
new_d = split_axis
concat_axis += (split_axis <= concat_axis) # Offset the existing axes by the new batch axis
split_axis += 1
if minor_axes:
x = all_to_all_p.bind(x, axis_name=minor_axes,
split_axis=split_axis, concat_axis=2,
axis_index_groups=axis_index_groups,
tiled=tiled)
# Fold the chunk axes into a single one
x = _foldaxis(0, _foldaxis(0, x))
split_axis -= 2; concat_axis -= 2; new_d -= 2
# Fold gathered axes into concat_axis
x = _foldaxis(concat_axis - 1, _moveaxis(0, concat_axis - 1, x))
new_d -= 1 # We've removed 0th dimension, so new_d needs to be adjusted
return x, new_d
def _all_to_all_effectful_abstract_eval(
input_aval, axis_name, split_axis, concat_axis, axis_index_groups, tiled
):
del tiled # expand_dims and squeeze is done in `all_to_all` if `True`
if not isinstance(axis_name, (list, tuple)):
axis_name = (axis_name,)
_check_axis_names(axis_name)
shape = list(input_aval.shape)
axis_size = psum(1, axis_name) if axis_index_groups is None else len(axis_index_groups[0])
assert shape[split_axis] % axis_size == 0, (shape[split_axis], axis_size)
shape[split_axis] //= axis_size
shape[concat_axis] *= axis_size
out_aval = input_aval.update(shape=tuple(shape), weak_type=False)
effects = {*map(core.NamedAxisEffect, axis_name)}
return out_aval, effects
all_to_all_p = core.Primitive('all_to_all')
all_to_all_p.def_effectful_abstract_eval(_all_to_all_effectful_abstract_eval)
mlir.register_lowering(all_to_all_p, _all_to_all_lowering)
ad.deflinear2(all_to_all_p, _all_to_all_transpose_rule)
batching.fancy_primitive_batchers[all_to_all_p] = _all_to_all_batched_collective
batching.skippable_batchers[all_to_all_p] = partial(_names_in_param, 'axis_name')
def _ragged_all_to_all_lowering(ctx, operand, output, input_offsets, send_sizes, output_offsets, recv_sizes):
N = input_offsets.type.shape[0]
backend_config = ir.DictAttr.get({
'replica_groups': ir.DenseIntElementsAttr.get(
np.arange(0, N, 1, dtype=np.int64), shape=[1, N]
)
})
return hlo.CustomCallOp(
result=[output.type],
inputs=[operand, output, input_offsets, send_sizes, output_offsets,
recv_sizes],
call_target_name=ir.StringAttr.get('ragged_all_to_all'),
backend_config=backend_config,
api_version=ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 4),
).results
@ragged_all_to_all_p.def_abstract_eval
def _ragged_all_to_all_abstract_eval(operand, output, input_offsets, send_sizes, output_offsets, recv_sizes):
if operand.shape != output.shape:
raise ValueError('ragged_all_to_all input and output shapes must be equal.')
if not dtypes.issubdtype(input_offsets.dtype, np.integer):
raise ValueError("ragged_all_to_all input_offsets must be integer type.")
if not dtypes.issubdtype(send_sizes.dtype, np.integer):
raise ValueError("ragged_all_to_all send_sizes must be integer type.")
if not dtypes.issubdtype(output_offsets.dtype, np.integer):
raise ValueError("ragged_all_to_all output_offsets must be integer type.")
if not dtypes.issubdtype(recv_sizes.dtype, np.integer):
raise ValueError("ragged_all_to_all recv_sizes must be integer type.")
if len(input_offsets.shape) != 1 or input_offsets.shape[0] < 1:
raise ValueError(
"ragged_all_to_all input_offsets must be rank 1 with positive dimension"
" size, but got shape {}".format(input_offsets.shape)
)
if len(send_sizes.shape) != 1 or send_sizes.shape[0] < 1:
raise ValueError(
"ragged_all_to_all send_sizes must be rank 1 with positive dimension"
" size, but got shape {}".format(send_sizes.shape)
)
if len(output_offsets.shape) != 1 or output_offsets.shape[0] < 1:
raise ValueError(
"ragged_all_to_all output_offsets must be rank 1 with positive"
" dimension size, but got shape {}".format(output_offsets.shape)
)
if len(recv_sizes.shape) != 1 or recv_sizes.shape[0] < 1:
raise ValueError(
"ragged_all_to_all recv_sizes must be rank 1 with positive dimension"
" size, but got shape {}".format(recv_sizes.shape)
)
return output.update(
shape=list(output.shape),
dtype=output.dtype,
weak_type=output.weak_type,
)
ragged_all_to_all_p.def_impl(partial(dispatch.apply_primitive, ragged_all_to_all_p))
mlir.register_lowering(ragged_all_to_all_p, _ragged_all_to_all_lowering)
def all_gather(x, axis_name, *, axis_index_groups=None, axis=0, tiled=False):
"""Gather values of x across all replicas.
If ``x`` is a pytree then the result is equivalent to mapping this function to
each leaf in the tree.
This is equivalent to, but faster than, all_to_all(broadcast(x)).
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a pmapped axis (see the
:func:`jax.pmap` documentation for more details).
axis_index_groups: optional list of lists containing axis indices (e.g. for
an axis of size 4, [[0, 1], [2, 3]] would run all gather over the first
two and last two replicas). Groups must cover all axis indices exactly
once, and all groups must be the same size.
axis: a positional axis into which the chunks along ``axis_name`` will be
concatenated.
tiled: when ``False``, the chunks will be stacked into a fresh positional
axis at index ``axis`` in the output. When ``True``, ``axis`` has to
refer to an existing positional dimension and the chunks will be
concatenated into that dimension.
Returns:
Array(s) representing the result of an all-gather along the axis
``axis_name``. Shapes are the same as ``x.shape``, but:
- when ``tiled`` is ``False``, there is a new dimension equal to the
size of axis ``axis_name`` in position ``axis``,
- when ``tiled`` is ``True``, the size of dimension in position ``axis``
is multiplied by the size of axis ``axis_name``.
For example, with 4 XLA devices available:
>>> x = np.arange(4)
>>> y = jax.pmap(lambda x: jax.lax.all_gather(x, 'i'), axis_name='i')(x)
>>> print(y)
[[0 1 2 3]
[0 1 2 3]
[0 1 2 3]
[0 1 2 3]]
An example of using axis_index_groups, groups split by even & odd device ids:
>>> x = np.arange(16).reshape(4, 4)
>>> print(x)
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]
[12 13 14 15]]
>>> def f(x):
... return jax.lax.all_gather(
... x, 'i', axis_index_groups=[[0, 2], [3, 1]])
>>> y = jax.pmap(f, axis_name='i')(x)
>>> print(y)
[[[ 0 1 2 3]
[ 8 9 10 11]]
[[12 13 14 15]
[ 4 5 6 7]]
[[ 0 1 2 3]
[ 8 9 10 11]]
[[12 13 14 15]
[ 4 5 6 7]]]
"""
if not isinstance(axis_name, tuple):
axis_name = axis_name,
axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups)
axis_size = psum(1, axis_name, axis_index_groups=axis_index_groups)
def bind(leaf):
return all_gather_p.bind(
leaf,
all_gather_dimension=canonicalize_axis(
axis, np.ndim(leaf) if tiled else np.ndim(leaf) + 1),
axis_name=axis_name, axis_index_groups=axis_index_groups,
axis_size=int(axis_size), tiled=tiled)
return tree_util.tree_map(bind, x)
def _all_gather_impl(x, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled):
raise AssertionError("Unexpected call to _all_gather_impl")
def _all_gather_lowering(ctx, x, *, all_gather_dimension, axis_name,
axis_index_groups, axis_size, tiled,
platform=None):
x_aval, = ctx.avals_in
out_aval, = ctx.avals_out
axis_context = ctx.module_context.axis_context
is_spmd = isinstance(axis_context, (SPMDAxisContext, ShardingContext))
if not tiled:
new_shape = list(x_aval.shape)
new_shape.insert(all_gather_dimension, 1)
broadcast_dimensions = [i for i in range(len(new_shape)) if i != all_gather_dimension]
x = hlo.broadcast_in_dim(
mlir.aval_to_ir_type(x_aval.update(shape=new_shape)), x,
mlir.dense_int_array(broadcast_dimensions))
replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name,
axis_index_groups)
if is_spmd:
# We want to emit the all-gather with global device IDs and a unique
# channel ID, as otherwise it interprets the devices as replicas instead
# of partitions - and XLA is configured with only a single replica.
channel = ctx.module_context.new_channel()
other_args = dict(
channel_handle=hlo.ChannelHandle.get(
channel, mlir.DEVICE_TO_DEVICE_TYPE),
use_global_device_ids=ir.BoolAttr.get(True))
else:
other_args = {}
if hlo.get_api_version() < 8:
return hlo.AllGatherOp(
mlir.aval_to_ir_type(out_aval),
x, all_gather_dim=mlir.i64_attr(all_gather_dimension),
replica_groups=_replica_groups_hlo(replica_groups),
**other_args).results
return hlo.AllGatherOp(
[mlir.aval_to_ir_type(out_aval)],
[x], all_gather_dim=mlir.i64_attr(all_gather_dimension),
replica_groups=_replica_groups_hlo(replica_groups),
**other_args).results
def _all_gather_effectful_abstract_eval(
x_aval, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled
):
if not isinstance(axis_name, (list, tuple)):
axis_name = (axis_name,)
_check_axis_names(axis_name)
new_shape = list(x_aval.shape)
if tiled:
new_shape[all_gather_dimension] *= axis_size
else:
new_shape.insert(all_gather_dimension, axis_size)
return x_aval.update(shape=new_shape), {*map(core.NamedAxisEffect, axis_name)}
def _all_gather_transpose_rule(cts, x, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled):
return (psum_scatter(cts, axis_name=axis_name,
scatter_dimension=all_gather_dimension,
axis_index_groups=axis_index_groups,
tiled=tiled),)
# TODO(sharadmv,apaszke): re-enable this when we can properly detect replication.
# return (lax.dynamic_index_in_dim(cts, idx, axis=all_gather_dimension, keepdims=False) * axis_size,)
def _all_gather_batcher(vals_in, dims_in, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled):
(x,), (d,) = vals_in, dims_in
if d is not batching.not_mapped:
if d <= all_gather_dimension:
all_gather_dimension += 1
elif not tiled: # Tiled all-gather doesn't modify the set of dimensions
d += 1
result = all_gather_p.bind(
x,
all_gather_dimension=all_gather_dimension,
axis_name=axis_name,
axis_index_groups=axis_index_groups,
axis_size=axis_size,
tiled=tiled)
return result, d
def _all_gather_batched_collective(axis_data, vals_in, dims_in,
all_gather_dimension, axis_name,
axis_index_groups, axis_size, tiled):
frame_size, frame_name = axis_data.size, axis_data.name
if frame_name not in axis_name:
return _all_gather_batcher(
vals_in, dims_in, all_gather_dimension=all_gather_dimension,
axis_name=axis_name, axis_index_groups=axis_index_groups,
axis_size=axis_size, tiled=tiled)
if axis_index_groups is not None:
raise NotImplementedError("axis_index_groups not supported in vmap")
assert axis_size == frame_size, "axis size doesn't match"
if not isinstance(axis_name, tuple):
axis_name = (axis_name,)
if len(axis_name) > 1:
raise NotImplementedError("Please open a feature request!")
assert axis_name == (frame_name,), "batcher called with wrong axis name"
(x,), (d,) = vals_in, dims_in
if d is batching.not_mapped:
out_shape = list(np.shape(x))
out_shape.insert(all_gather_dimension, axis_size)
broadcast_dims = [i for i in range(len(out_shape)) if i != all_gather_dimension]
y = lax.broadcast_in_dim(x, out_shape, broadcast_dims)
else:
y = _moveaxis(d, all_gather_dimension, x)
if tiled:
y = _foldaxis(all_gather_dimension, y)
return y, batching.not_mapped
all_gather_p = core.Primitive('all_gather')
all_gather_p.def_effectful_abstract_eval(_all_gather_effectful_abstract_eval)
all_gather_p.def_impl(_all_gather_impl)
mlir.register_lowering(all_gather_p, _all_gather_lowering)
for p in ("cuda", "rocm", "tpu"):
mlir.register_lowering(all_gather_p,
partial(_all_gather_lowering, platform=p),
platform=p)
ad.deflinear2(all_gather_p, _all_gather_transpose_rule)
batching.fancy_primitive_batchers[all_gather_p] = _all_gather_batched_collective
batching.skippable_batchers[all_gather_p] = partial(_names_in_param, 'axis_name')
def _reduce_scatter_lowering(
prim, ctx, x,
*, scatter_dimension, axis_name,
axis_index_groups, axis_size, tiled):
x_aval, = ctx.avals_in
aval_out, = ctx.avals_out
scalar_aval = x_aval.update(shape=())
replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name,
axis_index_groups)
scatter_out_shape = list(x_aval.shape)
scatter_out_shape[scatter_dimension] //= axis_size
axis_context = ctx.module_context.axis_context
is_spmd = isinstance(
axis_context,
(SPMDAxisContext, ShardingContext),
)
if is_spmd:
# We want to emit the all-gather with global device IDs and a unique
# channel ID, as otherwise it interprets the devices as replicas instead
# of partitions - and XLA is configured with only a single replica.
channel = ctx.module_context.new_channel()
other_args = dict(
channel_handle=hlo.ChannelHandle.get(
channel, mlir.DEVICE_TO_DEVICE_TYPE),
use_global_device_ids=ir.BoolAttr.get(True))
else:
other_args = {}
op = hlo.ReduceScatterOp(
mlir.aval_to_ir_type(x_aval.update(shape=scatter_out_shape)),
x,
scatter_dimension=mlir.i64_attr(scatter_dimension),
replica_groups=_replica_groups_hlo(replica_groups),
**other_args)
scalar_type = mlir.aval_to_ir_type(scalar_aval)
reducer_block = op.regions[0].blocks.append(scalar_type, scalar_type)
with ir.InsertionPoint(reducer_block):
lower_reducer = mlir.lower_fun(prim.bind, multiple_results=False)
reducer_ctx = ctx.replace(primitive=None,
avals_in=[scalar_aval] * 2,
avals_out=[scalar_aval])
out_nodes = lower_reducer(reducer_ctx, *reducer_block.arguments)
hlo.return_(mlir.flatten_ir_values(out_nodes))
if tiled:
return op.results
else:
return [hlo.reshape(mlir.aval_to_ir_type(aval_out), op.result)]
def _reduce_scatter_effectful_abstract_eval(
x_aval, *, axis_name, scatter_dimension, axis_index_groups, axis_size, tiled
):
if not isinstance(axis_name, (list, tuple)):
axis_name = (axis_name,)
_check_axis_names(axis_name)
new_shape = list(x_aval.shape)
scatter_dim_input_size = x_aval.shape[scatter_dimension]
if tiled:
if scatter_dim_input_size % axis_size != 0:
raise ValueError(f"tiled reduce_scatter operand scatter dimension size "
f"{scatter_dim_input_size} must be divisible by "
f"shard_count {axis_size}")
new_shape[scatter_dimension] = scatter_dim_input_size // axis_size
else:
if scatter_dim_input_size != axis_size:
raise ValueError(f"reduce_scatter operand scatter dimension size "
f"{scatter_dim_input_size} must match shard count "
f"{axis_size}")
del new_shape[scatter_dimension]
return x_aval.update(shape=new_shape), {*map(core.NamedAxisEffect, axis_name)}
def _reduce_scatter_transpose_rule(cts, x, *, axis_name, scatter_dimension,
axis_index_groups, axis_size, tiled):
return (all_gather(cts, axis_name=axis_name,
axis_index_groups=axis_index_groups,
axis=scatter_dimension, tiled=tiled),)
def _reduce_scatter_batcher(vals_in, dims_in, *, scatter_dimension, axis_name,
axis_index_groups, axis_size, tiled):
(x,), (d,) = vals_in, dims_in
if d <= scatter_dimension:
scatter_dimension += 1
elif not tiled: # Tiled all-scatter doesn't change the rank
d += 1
result = reduce_scatter_p.bind(
x,
scatter_dimension=scatter_dimension,
axis_name=axis_name,
axis_index_groups=axis_index_groups,
axis_size=axis_size,
tiled=tiled)
return result, d
def _reduce_scatter_collective(axis_data, vals_in, dims_in,
scatter_dimension, axis_name,
axis_index_groups, axis_size, tiled):
frame_size, frame_name = axis_data.size, axis_data.name
if frame_name not in axis_name:
return _reduce_scatter_batcher(
vals_in, dims_in, scatter_dimension=scatter_dimension,
axis_name=axis_name, axis_index_groups=axis_index_groups,
axis_size=axis_size, tiled=tiled)
if axis_index_groups is not None:
raise NotImplementedError("axis_index_groups not supported in vmap")
assert axis_size == frame_size, "axis size doesn't match"
if not isinstance(axis_name, tuple):
axis_name = (axis_name,)
if len(axis_name) > 1:
raise NotImplementedError("Please open a feature request!")
assert axis_name == (frame_name,), "batcher called with wrong axis name"
(x,), (d,) = vals_in, dims_in
if d is batching.not_mapped:
y, dy = x * axis_size, scatter_dimension
else:
y, dy = lax.reduce(x, 0., lax.add, (d,)), scatter_dimension
if tiled:
y = _splitaxis(dy, axis_size, y)
return y, dy
reduce_scatter_p = core.Primitive("reduce_scatter")
reduce_scatter_p.def_effectful_abstract_eval(
_reduce_scatter_effectful_abstract_eval
)
ad.deflinear2(reduce_scatter_p, _reduce_scatter_transpose_rule)
batching.fancy_primitive_batchers[reduce_scatter_p] = _reduce_scatter_collective
batching.skippable_batchers[reduce_scatter_p] = partial(_names_in_param, 'axis_name')
mlir.register_lowering(reduce_scatter_p,
partial(_reduce_scatter_lowering, lax.add_p))
def psum_scatter(x, axis_name, *, scatter_dimension=0, axis_index_groups=None,
tiled=False):
"""
Like ``psum(x, axis_name)`` but each device retains only part of the result.
For example, ``psum_scatter(x, axis_name, scatter_dimension=0, tiled=False)``
computes the same value as ``psum(x, axis_name)[axis_index(axis_name)]``, but
it is more efficient. Thus the ``psum`` result is left scattered along the
mapped axis.
One efficient algorithm for computing ``psum(x, axis_name)`` is to perform a
``psum_scatter`` followed by an ``all_gather``, essentially evaluating
``all_gather(psum_scatter(x, axis_name))``. So we can think of
``psum_scatter`` as "the first half" of a ``psum``.
Args:
x: array(s) with a mapped axis named ``axis_name``.
axis_name: hashable Python object used to name a mapped axis (see the
:func:`jax.pmap` documentation for more details).
scatter_dimension: a positional axis into which the all-reduce result along
``axis_name`` will be scattered.
axis_index_groups: optional list of lists of integers containing axis
indices. For example, for an axis of size 4,
``axis_index_groups=[[0, 1], [2, 3]]`` would run reduce-scatter over the
first two and the last two axis indices. Groups must cover all axis
indices exactly once, and all groups must be the same size.
tiled: boolean representing whether to use rank-preserving 'tiled' behavior.
When ``False`` (the default value), the size of dimension in
``scatter_dimension`` must match the size of axis ``axis_name`` (or the
group size if ``axis_index_groups`` is given). After scattering the
all-reduce result along ``scatter_dimension``, the output is squeezed by
removing ``scatter_dimension``, so the result has lower rank than the
input. When ``True``, the size of dimension in ``scatter_dimension`` must
be divisible by the size of axis ``axis_name`` (or the group size if
``axis_index_groups`` is given), and the ``scatter_dimension`` axis is
preserved (so the result has the same rank as the input).
Returns:
Array(s) with the similar shape as ``x``, except the size of dimension in
position ``scatter_dimension`` is divided by the size of axis ``axis_name``
(when ``tiled=True``), or the dimension in position ``scatter_dimension`` is
eliminated (when ``tiled=False``).
For example, with 4 XLA devices available:
>>> x = np.arange(16).reshape(4, 4)
>>> print(x)
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]
[12 13 14 15]]
>>> y = jax.pmap(lambda x: jax.lax.psum_scatter(x, 'i'), axis_name='i')(x)
>>> print(y)
[24 28 32 36]
if using tiled:
>>> y = jax.pmap(lambda x: jax.lax.psum_scatter(x, 'i', tiled=True), axis_name='i')(x)
>>> print(y)
[[24]
[28]
[32]
[36]]
An example of using axis_index_groups:
>>> def f(x):
... return jax.lax.psum_scatter(
... x, 'i', axis_index_groups=[[0, 2], [3, 1]], tiled=True)
>>> y = jax.pmap(f, axis_name='i')(x)
>>> print(y)
[[ 8 10]
[20 22]
[12 14]
[16 18]]
"""
if not isinstance(axis_name, tuple):
axis_name = axis_name,
axis_size = psum(1, axis_name, axis_index_groups=axis_index_groups)
axis_index_groups = _canonicalize_axis_index_groups(axis_index_groups)
bind = partial(
reduce_scatter_p.bind,
axis_name=axis_name,
scatter_dimension=scatter_dimension,
axis_index_groups=axis_index_groups,
axis_size=axis_size,
tiled=tiled)
return tree_util.tree_map(bind, x)
def _build_axis_index_lowering_hlo(ctx, axis_name, axis_env):
if isinstance(axis_name, tuple):
assert axis_name, 'empty axis name'
if len(axis_name) > 1:
raise NotImplementedError(
'`axis_index` translation rule does not support multiple axis names.')
axis_name, = axis_name
if axis_name not in axis_env.names:
raise NameError(f"unbound axis name: {axis_name}")
axis_pos = list(axis_env.names).index(axis_name)
nreplicas = axis_env.nreps // math.prod(axis_env.sizes)
div = mlir.ir_constant(
np.array(
nreplicas * math.prod(axis_env.sizes[axis_pos + 1 :]), dtype=np.uint32
)
)
mod = mlir.ir_constant(np.array(axis_env.sizes[axis_pos], dtype=np.uint32))
axis_context = ctx.module_context.axis_context
is_spmd = isinstance(
axis_context,
(SPMDAxisContext, ShardingContext),
)
if is_spmd:
device_id = hlo.partition_id()
else:
device_id = hlo.replica_id()
unsigned_index = hlo.remainder(hlo.divide(device_id, div), mod)
return hlo.convert(
ir.RankedTensorType.get([], ir.IntegerType.get_signless(32)),
unsigned_index)
def _axis_index_lowering(ctx, *, axis_name):
return [_build_axis_index_lowering_hlo(ctx, axis_name,
ctx.module_context.axis_env)]
def _axis_index_effectful_abstract_eval(*, axis_name):
_check_axis_names([axis_name])
return ShapedArray((), np.int32), {core.NamedAxisEffect(axis_name)}
def _axis_index_batcher(axis_data, vals_in, dims_in, *, axis_name):
return lax.iota(np.int32, axis_data.size), 0
axis_index_p = core.Primitive('axis_index')
axis_index_p.def_impl(partial(dispatch.apply_primitive, axis_index_p))
mlir.register_lowering(axis_index_p, _axis_index_lowering)
axis_index_p.def_effectful_abstract_eval(_axis_index_effectful_abstract_eval)
batching.fancy_primitive_batchers[axis_index_p] = _axis_index_batcher
batching.skippable_batchers[axis_index_p] = partial(_names_in_param, 'axis_name')
def _pgather_impl(src, idx, *, axes):
assert all(isinstance(axis, int) for axis in axes)
src_axes_front = moveaxis(src, axes, range(len(axes)))
non_axes_shape = src_axes_front.shape[len(axes):]
src_one_axis_front = src_axes_front.reshape((-1,) + non_axes_shape)
slice_sizes = (1,) + non_axes_shape
idx = lax.expand_dims(idx, (-1,))
offset_dims = tuple(range(idx.ndim - 1, idx.ndim + src_one_axis_front.ndim - 2))
dnums = slicing.GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=(0,),
start_index_map=(0,),
)
return slicing.gather(src_one_axis_front, idx, dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def _pgather_abstract_eval(src, idx, *, axes):
# TODO: Avals with names rule: remove all axes from src, insert those from idx
# The order is important, because it is ok to re-insert one of the deleted axes!
_check_axis_names(axes)
shape = list(src.shape)
for axis in sorted((a for a in axes if isinstance(a, int)), reverse=True):
del shape[axis]
shape = idx.shape + tuple(shape)
return ShapedArray(shape, src.dtype)
def _pgather_parallel_lowering(ctx, src, idx, *, axes):
if any(not isinstance(axis, int) for axis in axes):
raise NotImplementedError("pgather only supported in the SPMD lowering."
"Please open a feature request!")
return mlir.lower_fun(_pgather_impl, multiple_results=False)(
ctx, src, idx, axes=axes)
def _pgather_collective_batcher(axis_size, frame_name, _, vals_in, dims_in, *, axes):
src, idx = vals_in
dsrc, didx = dims_in
if dsrc is batching.not_mapped:
raise ValueError("pgather axis {frame.name} is missing from the indexed value")
if didx is not batching.not_mapped:
# NOTE: This is allowed and the output would be mapped along this axis!
raise NotImplementedError("Please open a feature request!")
# Now source is mapped, idx is not
new_axes = tuple(dsrc if axis == frame_name else
axis + (dsrc <= axis) if isinstance(axis, int) else
axis
for axis in axes)
# The result is not mapped, because we eliminate all axes, and those include
# the batched axis.
if all(isinstance(axis, int) for axis in axes):
# We rewrite a purely positional pgather as a gather, because that one
# is more fully featured (e.g. supports AD).
return _pgather_impl(src, idx, axes=new_axes), batching.not_mapped
else:
return pgather_p.bind(src, idx, axes=new_axes), batching.not_mapped
pgather_p = core.Primitive('pgather')
pgather_p.def_impl(_pgather_impl)
pgather_p.def_abstract_eval(_pgather_abstract_eval)
mlir.register_lowering(pgather_p, _pgather_parallel_lowering)
# TODO: Transpose? That requires adding pscatter...
batching.fancy_primitive_batchers[pgather_p] = _pgather_collective_batcher
batching.skippable_batchers[pgather_p] = partial(_names_in_param, 'axes')
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@lax@parallel.py@.PATH_END.py
|
{
"filename": "terminal.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/_pytest/terminal.py",
"type": "Python"
}
|
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from __future__ import absolute_import, division, print_function
import itertools
import platform
import sys
import time
import pluggy
import py
import six
import pytest
from _pytest import nodes
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-q', '--quiet', action="count",
dest="quiet", default=0, help="decrease verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default='', metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed, "
"(p)passed, (P)passed with output, (a)all except pP. "
"Warnings are displayed at all times except when "
"--disable-warnings is set")
group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
dest='disable_warnings', action='store_true',
help='disable warnings summary')
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='auto',
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
help="traceback print mode (auto/long/short/line/native/no).")
group._addoption('--fulltrace', '--full-trace',
action="store_true", default=False,
help="don't cut any tracebacks (default is to cut).")
group._addoption('--color', metavar="color",
action="store", dest="color", default='auto',
choices=['yes', 'no', 'auto'],
help="color terminal output (yes/no/auto).")
parser.addini("console_output_style",
help="console output: classic or with additional progress information (classic|progress).",
default='progress')
def pytest_configure(config):
config.option.verbose -= config.option.quiet
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, 'terminalreporter')
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
reportchars = config.option.reportchars
if not config.option.disable_warnings and 'w' not in reportchars:
reportchars += 'w'
elif config.option.disable_warnings and 'w' in reportchars:
reportchars = reportchars.replace('w', '')
if reportchars:
for char in reportchars:
if char not in reportopts and char != 'a':
reportopts += char
elif char == 'a':
reportopts = 'fEsxXw'
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
class WarningReport:
"""
Simple structure to hold warnings information captured by ``pytest_logwarning``.
"""
def __init__(self, code, message, nodeid=None, fslocation=None):
"""
:param code: unused
:param str message: user friendly message about the warning
:param str|None nodeid: node id that generated the warning (see ``get_location``).
:param tuple|py.path.local fslocation:
file system location of the source of the warning (see ``get_location``).
"""
self.code = code
self.message = message
self.nodeid = nodeid
self.fslocation = fslocation
def get_location(self, config):
"""
Returns the more user-friendly information about the location
of a warning, or None.
"""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = py.path.local(filename).relto(config.invocation_dir)
return '%s:%s' % (relpath, linenum)
else:
return str(self.fslocation)
return None
class TerminalReporter:
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self._session = None
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
# self.writer will be deprecated in pytest-3.4
self.writer = self._tw
self._screen_width = self._tw.fullwidth
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_items_reported = 0
self._show_progress_info = self.config.getini('console_output_style') == 'progress'
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
if self.currentfspath is not None:
self._write_progress_information_filling_space()
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
self._write_progress_information_filling_space()
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not isinstance(line, six.text_type):
line = six.text_type(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
"""
Rewinds the terminal cursor to the beginning and writes the given line.
:kwarg erase: if True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop('erase', False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = ' ' * fill_count
else:
fill = ''
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in six.text_type(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, code, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
warning = WarningReport(code=code, fslocation=fslocation,
message=message, nodeid=nodeid)
warnings.append(warning)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
cat, letter, word = res
if isinstance(word, tuple):
word, markup = word
else:
markup = None
self.stats.setdefault(cat, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
running_xdist = hasattr(rep, 'node')
self._progress_items_reported += 1
if self.verbosity <= 0:
if not running_xdist and self.showfspath:
self.write_fspath_result(rep.nodeid, letter)
else:
self._tw.write(letter)
self._write_progress_if_past_edge()
else:
if markup is None:
if rep.passed:
markup = {'green': True}
elif rep.failed:
markup = {'red': True}
elif rep.skipped:
markup = {'yellow': True}
else:
markup = {}
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(self._get_progress_information_message() + " ", cyan=True)
else:
self._tw.write(' ')
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def _write_progress_if_past_edge(self):
if not self._show_progress_info:
return
last_item = self._progress_items_reported == self._session.testscollected
if last_item:
self._write_progress_information_filling_space()
return
past_edge = self._tw.chars_on_current_line + self._PROGRESS_LENGTH + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + '\n', cyan=True)
_PROGRESS_LENGTH = len(' [100%]')
def _get_progress_information_message(self):
collected = self._session.testscollected
if collected:
progress = self._progress_items_reported * 100 // collected
return ' [{:3d}%]'.format(progress)
return ' [100%]'
def _write_progress_information_filling_space(self):
if not self._show_progress_info:
return
msg = self._get_progress_information_message()
fill = ' ' * (self._tw.fullwidth - self._tw.chars_on_current_line - len(msg) - 1)
self.write(fill + msg, cyan=True)
def pytest_collection(self):
if not self.isatty and self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
# self.write_fspath_result(report.nodeid, 'E')
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
line = "collected "
else:
line = "collecting "
line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's')
if errors:
line += " / %d errors" % errors
if skipped:
line += " / %d skipped" % skipped
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write('\n')
else:
self.write_line(line)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._session = session
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, 'pypy_version_info'):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__, py.__version__, pluggy.__version__)
if self.verbosity > 0 or self.config.option.debug or \
getattr(self.config.option, 'pastebin', None):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(self, lines):
lines.reverse()
for line in flatten(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = " " + config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append(
"plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get('failed'):
self._tw.sep("!", "collection failures")
for rep in self.stats.get('failed'):
rep.toterminal(self._tw)
return 1
return 0
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config, startdir=self.startdir, items=session.items)
self._write_report_lines_from_hooks(lines)
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split('::', 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[:len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack):]:
stack.append(col)
# if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(terminalreporter=self,
exitstatus=exitstatus)
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, '_keyboardinterrupt_memo'):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
excrepr.reprcrash.toterminal(self._tw)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[:-len(domain)]
values = domain.split("[")
values[0] = values[0].replace('.', '::') # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if nodeid.split("::")[0] != fspath.replace("\\", nodes.SEP):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, 'location'):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, '_pdbshown'):
values.append(x)
return values
def summary_warnings(self):
if self.hasopt("w"):
all_warnings = self.stats.get("warnings")
if not all_warnings:
return
grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
self.write_sep("=", "warnings summary", yellow=True, bold=False)
for location, warning_records in grouped:
self._tw.line(str(location) or '<undetermined location>')
for w in warning_records:
lines = w.message.splitlines()
indented = '\n'.join(' ' + x for x in lines)
self._tw.line(indented)
self._tw.line()
self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html')
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports('passed')
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def print_teardown_sections(self, rep):
for secname, content in rep.sections:
if 'teardown' in secname:
self._tw.sep('-', secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('failed')
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
markup = {'red': True, 'bold': True}
self.write_sep("_", msg, **markup)
self._outrep_summary(rep)
for report in self.getreports(''):
if report.nodeid == rep.nodeid and report.when == 'teardown':
self.print_teardown_sections(report)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports('error')
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats['error']:
msg = self._getfailureheadline(rep)
if not hasattr(rep, 'when'):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
for secname, content in rep.sections:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, 'bold': True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def summary_deselected(self):
if 'deselected' in self.stats:
self.write_sep("=", "%d tests deselected" % (
len(self.stats['deselected'])), bold=True)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def flatten(values):
for x in values:
if isinstance(x, (list, tuple)):
for y in flatten(x):
yield y
else:
yield x
def build_summary_stats_line(stats):
keys = ("failed passed skipped deselected "
"xfailed xpassed warnings error").split()
unknown_key_seen = False
for key in stats.keys():
if key not in keys:
if key: # setup/teardown reports have an empty key, ignore them
keys.append(key)
unknown_key_seen = True
parts = []
for key in keys:
val = stats.get(key, None)
if val:
parts.append("%d %s" % (len(val), key))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if 'failed' in stats or 'error' in stats:
color = 'red'
elif 'warnings' in stats or unknown_key_seen:
color = 'yellow'
elif 'passed' in stats:
color = 'green'
else:
color = 'yellow'
return (line, color)
def _plugin_nameversions(plugininfo):
values = []
for plugin, dist in plugininfo:
# gets us name and version!
name = '{dist.project_name}-{dist.version}'.format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in values:
values.append(name)
return values
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@_pytest@terminal.py@.PATH_END.py
|
{
"filename": "ellipse.py",
"repo_name": "astropy/photutils",
"repo_path": "photutils_extracted/photutils-main/photutils/aperture/ellipse.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines elliptical and elliptical-annulus apertures in both
pixel and sky coordinates.
"""
import math
import astropy.units as u
import numpy as np
from photutils.aperture.attributes import (PixelPositions, PositiveScalar,
PositiveScalarAngle, ScalarAngle,
ScalarAngleOrValue,
SkyCoordPositions)
from photutils.aperture.core import PixelAperture, SkyAperture
from photutils.aperture.mask import ApertureMask
from photutils.geometry import elliptical_overlap_grid
__all__ = [
'EllipticalAnnulus',
'EllipticalAperture',
'EllipticalMaskMixin',
'SkyEllipticalAnnulus',
'SkyEllipticalAperture',
]
class EllipticalMaskMixin:
"""
Mixin class to create masks for elliptical and elliptical-annulus
aperture objects.
"""
def to_mask(self, method='exact', subpixels=5):
"""
Return a mask for the aperture.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture
on the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The exact fractional overlap of the aperture and each
pixel is calculated. The aperture weights will contain
values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out of
the aperture. The aperture weights will contain values
only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the ``subpixels``
keyword), each of which are considered to be entirely in
or out of the aperture depending on whether its center is
in or out of the aperture. If ``subpixels=1``, this method
is equivalent to ``'center'``. The aperture weights will
contain values between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this
factor in each dimension. That is, each pixel is divided
into ``subpixels**2`` subpixels. This keyword is ignored
unless ``method='subpixel'``.
Returns
-------
mask : `~photutils.aperture.ApertureMask` or list of \
`~photutils.aperture.ApertureMask`
A mask for the aperture. If the aperture is scalar then
a single `~photutils.aperture.ApertureMask` is returned,
otherwise a list of `~photutils.aperture.ApertureMask` is
returned.
"""
use_exact, subpixels = self._translate_mask_mode(method, subpixels)
if hasattr(self, 'a'):
a = self.a
b = self.b
elif hasattr(self, 'a_in'): # annulus
a = self.a_out
b = self.b_out
else:
raise ValueError('Cannot determine the aperture shape.')
masks = []
for bbox, edges in zip(self._bbox, self._centered_edges, strict=True):
ny, nx = bbox.shape
mask = elliptical_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, a, b,
self._theta_radians,
use_exact, subpixels)
# subtract the inner ellipse for an annulus
if hasattr(self, 'a_in'):
mask -= elliptical_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, self.a_in,
self.b_in, self._theta_radians,
use_exact, subpixels)
masks.append(ApertureMask(mask, bbox))
if self.isscalar:
return masks[0]
return masks
@staticmethod
def _calc_extents(semimajor_axis, semiminor_axis, theta):
"""
Calculate half of the bounding box extents of an ellipse.
"""
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
semimajor_x = semimajor_axis * cos_theta
semimajor_y = semimajor_axis * sin_theta
semiminor_x = semiminor_axis * -sin_theta
semiminor_y = semiminor_axis * cos_theta
x_extent = np.sqrt(semimajor_x**2 + semiminor_x**2)
y_extent = np.sqrt(semimajor_y**2 + semiminor_y**2)
return x_extent, y_extent
class EllipticalAperture(EllipticalMaskMixin, PixelAperture):
"""
An elliptical aperture defined in pixel coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : array_like
The pixel coordinates of the aperture center(s) in one of the
following formats:
* single ``(x, y)`` pair as a tuple, list, or `~numpy.ndarray`
* tuple, list, or `~numpy.ndarray` of ``(x, y)`` pairs
a : float
The semimajor axis of the ellipse in pixels.
b : float
The semiminor axis of the ellipse in pixels.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or
value in radians (as a float) from the positive ``x`` axis. The
rotation angle increases counterclockwise.
Raises
------
ValueError : `ValueError`
If either axis (``a`` or ``b``) is negative.
Examples
--------
>>> from astropy.coordinates import Angle
>>> from photutils.aperture import EllipticalAperture
>>> theta = Angle(80, 'deg')
>>> aper = EllipticalAperture([10.0, 20.0], 5.0, 3.0)
>>> aper = EllipticalAperture((10.0, 20.0), 5.0, 3.0, theta=theta)
>>> pos1 = (10.0, 20.0) # (x, y)
>>> pos2 = (30.0, 40.0)
>>> pos3 = (50.0, 60.0)
>>> aper = EllipticalAperture([pos1, pos2, pos3], 5.0, 3.0)
>>> aper = EllipticalAperture((pos1, pos2, pos3), 5.0, 3.0, theta=theta)
"""
_params = ('positions', 'a', 'b', 'theta')
positions = PixelPositions('The center pixel position(s).')
a = PositiveScalar('The semimajor axis in pixels.')
b = PositiveScalar('The semiminor axis in pixels.')
theta = ScalarAngleOrValue('The counterclockwise rotation angle as an '
'angular Quantity or value in radians from '
'the positive x axis.')
def __init__(self, positions, a, b, theta=0.0):
self.positions = positions
self.a = a
self.b = b
self._theta_radians = 0.0 # defined by theta setter
self.theta = theta
@property
def _xy_extents(self):
return self._calc_extents(self.a, self.b, self._theta_radians)
@property
def area(self):
"""
The exact geometric area of the aperture shape.
"""
return math.pi * self.a * self.b
def _to_patch(self, origin=(0, 0), **kwargs):
"""
Return a `~matplotlib.patches.Patch` for the aperture.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : dict, optional
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : `~matplotlib.patches.Patch` or list of \
`~matplotlib.patches.Patch`
A patch for the aperture. If the aperture is scalar then a
single `~matplotlib.patches.Patch` is returned, otherwise a
list of `~matplotlib.patches.Patch` is returned.
"""
import matplotlib.patches as mpatches
xy_positions, patch_kwargs = self._define_patch_params(origin=origin,
**kwargs)
theta_deg = self._theta_radians * 180.0 / np.pi
patches = [mpatches.Ellipse(xy_position, 2.0 * self.a, 2.0 * self.b,
angle=theta_deg, **patch_kwargs)
for xy_position in xy_positions]
if self.isscalar:
return patches[0]
return patches
def to_mask(self, method='exact', subpixels=5):
return EllipticalMaskMixin.to_mask(self, method=method,
subpixels=subpixels)
def to_sky(self, wcs):
"""
Convert the aperture to a `SkyEllipticalAperture` object defined
in celestial coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `SkyEllipticalAperture` object
A `SkyEllipticalAperture` object.
"""
return SkyEllipticalAperture(**self._to_sky_params(wcs))
class EllipticalAnnulus(EllipticalMaskMixin, PixelAperture):
r"""
An elliptical annulus aperture defined in pixel coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : array_like
The pixel coordinates of the aperture center(s) in one of the
following formats:
* single ``(x, y)`` pair as a tuple, list, or `~numpy.ndarray`
* tuple, list, or `~numpy.ndarray` of ``(x, y)`` pairs
a_in : float
The inner semimajor axis of the elliptical annulus in pixels.
a_out : float
The outer semimajor axis of the elliptical annulus in pixels.
b_out : float
The outer semiminor axis of the elliptical annulus in pixels.
b_in : `None` or float, optional
The inner semiminor axis of the elliptical annulus in pixels.
If `None`, then the inner semiminor axis is calculated as:
.. math::
b_{in} = b_{out} \left(\frac{a_{in}}{a_{out}}\right)
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or
value in radians (as a float) from the positive ``x`` axis. The
rotation angle increases counterclockwise.
Raises
------
ValueError : `ValueError`
If inner semimajor axis (``a_in``) is greater than outer semimajor
axis (``a_out``).
ValueError : `ValueError`
If either the inner semimajor axis (``a_in``) or the outer semiminor
axis (``b_out``) is negative.
Examples
--------
>>> from astropy.coordinates import Angle
>>> from photutils.aperture import EllipticalAnnulus
>>> theta = Angle(80, 'deg')
>>> aper = EllipticalAnnulus([10.0, 20.0], 3.0, 8.0, 5.0)
>>> aper = EllipticalAnnulus((10.0, 20.0), 3.0, 8.0, 5.0, theta=theta)
>>> pos1 = (10.0, 20.0) # (x, y)
>>> pos2 = (30.0, 40.0)
>>> pos3 = (50.0, 60.0)
>>> aper = EllipticalAnnulus([pos1, pos2, pos3], 3.0, 8.0, 5.0)
>>> aper = EllipticalAnnulus((pos1, pos2, pos3), 3.0, 8.0, 5.0,
... theta=theta)
"""
_params = ('positions', 'a_in', 'a_out', 'b_in', 'b_out', 'theta')
positions = PixelPositions('The center pixel position(s).')
a_in = PositiveScalar('The inner semimajor axis in pixels.')
a_out = PositiveScalar('The outer semimajor axis in pixels.')
b_in = PositiveScalar('The inner semiminor axis in pixels.')
b_out = PositiveScalar('The outer semiminor axis in pixels.')
theta = ScalarAngleOrValue('The counterclockwise rotation angle as an '
'angular Quantity or value in radians from '
'the positive x axis.')
def __init__(self, positions, a_in, a_out, b_out, b_in=None, theta=0.0):
if not a_out > a_in:
raise ValueError('"a_out" must be greater than "a_in".')
self.positions = positions
self.a_in = a_in
self.a_out = a_out
self.b_out = b_out
if b_in is None:
b_in = self.b_out * self.a_in / self.a_out
elif not b_out > b_in:
raise ValueError('"b_out" must be greater than "b_in".')
self.b_in = b_in
self._theta_radians = 0.0 # defined by theta setter
self.theta = theta
@property
def _xy_extents(self):
return self._calc_extents(self.a_out, self.b_out, self._theta_radians)
@property
def area(self):
"""
The exact geometric area of the aperture shape.
"""
return math.pi * (self.a_out * self.b_out - self.a_in * self.b_in)
def _to_patch(self, origin=(0, 0), **kwargs):
"""
Return a `~matplotlib.patches.Patch` for the aperture.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : dict, optional
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : `~matplotlib.patches.Patch` or list of \
`~matplotlib.patches.Patch`
A patch for the aperture. If the aperture is scalar then a
single `~matplotlib.patches.Patch` is returned, otherwise a
list of `~matplotlib.patches.Patch` is returned.
"""
import matplotlib.patches as mpatches
xy_positions, patch_kwargs = self._define_patch_params(origin=origin,
**kwargs)
patches = []
theta_deg = self._theta_radians * 180.0 / np.pi
for xy_position in xy_positions:
patch_inner = mpatches.Ellipse(xy_position, 2.0 * self.a_in,
2.0 * self.b_in, angle=theta_deg)
patch_outer = mpatches.Ellipse(xy_position, 2.0 * self.a_out,
2.0 * self.b_out, angle=theta_deg)
path = self._make_annulus_path(patch_inner, patch_outer)
patches.append(mpatches.PathPatch(path, **patch_kwargs))
if self.isscalar:
return patches[0]
return patches
def to_mask(self, method='exact', subpixels=5):
return EllipticalMaskMixin.to_mask(self, method=method,
subpixels=subpixels)
def to_sky(self, wcs):
"""
Convert the aperture to a `SkyEllipticalAnnulus` object defined
in celestial coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `SkyEllipticalAnnulus` object
A `SkyEllipticalAnnulus` object.
"""
return SkyEllipticalAnnulus(**self._to_sky_params(wcs))
class SkyEllipticalAperture(SkyAperture):
"""
An elliptical aperture defined in sky coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : `~astropy.coordinates.SkyCoord`
The celestial coordinates of the aperture center(s). This can be
either scalar coordinates or an array of coordinates.
a : scalar `~astropy.units.Quantity`
The semimajor axis of the ellipse in angular units.
b : scalar `~astropy.units.Quantity`
The semiminor axis of the ellipse in angular units.
theta : scalar `~astropy.units.Quantity`, optional
The position angle (in angular units) of the ellipse semimajor
axis. For a right-handed world coordinate system, the position
angle increases counterclockwise from North (PA=0).
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from photutils.aperture import SkyEllipticalAperture
>>> positions = SkyCoord(ra=[10.0, 20.0], dec=[30.0, 40.0], unit='deg')
>>> aper = SkyEllipticalAperture(positions, 1.0*u.arcsec, 0.5*u.arcsec)
"""
_params = ('positions', 'a', 'b', 'theta')
positions = SkyCoordPositions('The center position(s) in sky coordinates.')
a = PositiveScalarAngle('The semimajor axis in angular units.')
b = PositiveScalarAngle('The semiminor axis in angular units.')
theta = ScalarAngle('The position angle in angular units of the ellipse '
'semimajor axis.')
def __init__(self, positions, a, b, theta=0.0 * u.deg):
self.positions = positions
self.a = a
self.b = b
self.theta = theta
def to_pixel(self, wcs):
"""
Convert the aperture to an `EllipticalAperture` object defined
in pixel coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `EllipticalAperture` object
An `EllipticalAperture` object.
"""
return EllipticalAperture(**self._to_pixel_params(wcs))
class SkyEllipticalAnnulus(SkyAperture):
r"""
An elliptical annulus aperture defined in sky coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : `~astropy.coordinates.SkyCoord`
The celestial coordinates of the aperture center(s). This can be
either scalar coordinates or an array of coordinates.
a_in : scalar `~astropy.units.Quantity`
The inner semimajor axis in angular units.
a_out : scalar `~astropy.units.Quantity`
The outer semimajor axis in angular units.
b_out : scalar `~astropy.units.Quantity`
The outer semiminor axis in angular units.
b_in : `None` or scalar `~astropy.units.Quantity`
The inner semiminor axis in angular units. If `None`, then the
inner semiminor axis is calculated as:
.. math::
b_{in} = b_{out} \left(\frac{a_{in}}{a_{out}}\right)
theta : scalar `~astropy.units.Quantity`, optional
The position angle (in angular units) of the ellipse semimajor
axis. For a right-handed world coordinate system, the position
angle increases counterclockwise from North (PA=0).
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from photutils.aperture import SkyEllipticalAnnulus
>>> positions = SkyCoord(ra=[10.0, 20.0], dec=[30.0, 40.0], unit='deg')
>>> aper = SkyEllipticalAnnulus(positions, 0.5*u.arcsec, 2.0*u.arcsec,
... 1.0*u.arcsec)
"""
_params = ('positions', 'a_in', 'a_out', 'b_in', 'b_out', 'theta')
positions = SkyCoordPositions('The center position(s) in sky coordinates.')
a_in = PositiveScalarAngle('The inner semimajor axis in angular units.')
a_out = PositiveScalarAngle('The outer semimajor axis in angular units.')
b_in = PositiveScalarAngle('The inner semiminor axis in angular units.')
b_out = PositiveScalarAngle('The outer semiminor axis in angular units.')
theta = ScalarAngle('The position angle in angular units of the ellipse '
'semimajor axis.')
def __init__(self, positions, a_in, a_out, b_out, b_in=None,
theta=0.0 * u.deg):
if not a_out > a_in:
raise ValueError('"a_out" must be greater than "a_in".')
self.positions = positions
self.a_in = a_in
self.a_out = a_out
self.b_out = b_out
if b_in is None:
b_in = self.b_out * self.a_in / self.a_out
elif not b_out > b_in:
raise ValueError('"b_out" must be greater than "b_in".')
self.b_in = b_in
self.theta = theta
def to_pixel(self, wcs):
"""
Convert the aperture to an `EllipticalAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `EllipticalAnnulus` object
An `EllipticalAnnulus` object.
"""
return EllipticalAnnulus(**self._to_pixel_params(wcs))
|
astropyREPO_NAMEphotutilsPATH_START.@photutils_extracted@photutils-main@photutils@aperture@ellipse.py@.PATH_END.py
|
{
"filename": "test_simulation_util.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_Util/test_simulation_util.py",
"type": "Python"
}
|
# import main simulation class of lenstronomy
import lenstronomy.Util.simulation_util as sim_util
from lenstronomy.Data.imaging_data import ImageData
import pytest
class TestSimulation(object):
def setup_method(self):
pass
def test_data_configure_simple(self):
# data specifics
sigma_bkg = 1.0 # background noise per pixel
exp_time = 10 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
numPix = 100 # cutout pixel size
deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2)
fwhm = 0.5 # full width half max of PSF
# PSF specification
kwargs_data = sim_util.data_configure_simple(
numPix, deltaPix, exp_time, sigma_bkg
)
data_class = ImageData(**kwargs_data)
assert data_class.pixel_width == deltaPix
if __name__ == "__main__":
pytest.main()
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_Util@test_simulation_util.py@.PATH_END.py
|
{
"filename": "_sizemode.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolar/marker/_sizemode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="sizemode", parent_name="scatterpolar.marker", **kwargs
):
super(SizemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["diameter", "area"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolar@marker@_sizemode.py@.PATH_END.py
|
{
"filename": "shelve.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/shelve.py",
"type": "Python"
}
|
"""Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import DEFAULT_PROTOCOL, Pickler, Unpickler
from io import BytesIO
import collections.abc
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.abc.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.abc.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = DEFAULT_PROTOCOL
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
if self.dict is None:
return
try:
self.sync()
try:
self.dict.close()
except AttributeError:
pass
finally:
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except:
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol.
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@shelve.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "mseitzer/pytorch-fid",
"repo_path": "pytorch-fid_extracted/pytorch-fid-master/setup.py",
"type": "Python"
}
|
import os
import setuptools
def read(rel_path):
base_path = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base_path, rel_path), "r") as f:
return f.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
# __version__ = "0.9"
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
if __name__ == "__main__":
setuptools.setup(
name="pytorch-fid",
version=get_version(os.path.join("src", "pytorch_fid", "__init__.py")),
author="Max Seitzer",
description=(
"Package for calculating Frechet Inception Distance (FID)" " using PyTorch"
),
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/mseitzer/pytorch-fid",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
],
python_requires=">=3.5",
entry_points={
"console_scripts": [
"pytorch-fid = pytorch_fid.fid_score:main",
],
},
install_requires=[
"numpy",
"pillow",
"scipy",
"torch>=1.0.1",
"torchvision>=0.2.2",
],
extras_require={
"dev": ["flake8", "flake8-bugbear", "flake8-isort", "black==24.3.0", "nox"]
},
)
|
mseitzerREPO_NAMEpytorch-fidPATH_START.@pytorch-fid_extracted@pytorch-fid-master@setup.py@.PATH_END.py
|
{
"filename": "_type.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/carpet/baxis/_type.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="type", parent_name="carpet.baxis", **kwargs):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["-", "linear", "date", "category"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@carpet@baxis@_type.py@.PATH_END.py
|
{
"filename": "train_regressor_CNN.py",
"repo_name": "epfl-radio-astro/LiSA",
"repo_path": "LiSA_extracted/LiSA-main/utils/train_regressor_CNN.py",
"type": "Python"
}
|
import numpy as np
import sys, time, os
import matplotlib.pyplot as plt
import tensorflow as tf
from pathlib import Path
from modules.truth_info import TruthSource
from modules.domain_reader import BinaryDomainReader as DenoisedReader
from modules.domain_reader import AstropyDomainReader as Reader
from modules.ai.enums import AugmentMode
from modules.ai.regressor import CNN
from modules.ai.utils import get_cutouts, loss_fn, asymmetry
from modules.truth_info import transforms
def make_training_data(size, rank, training_filepath, filepath_truth, skip_existing = True):
original_data = "/scratch/etolley/SDC2/dev/sky_ldev_v2.fits"
denoised_data = "/scratch/etolley/SDC2/dev_denoised3d/sky_ldev_3Ddenoised_thr3_*.npy"
reader1 = None
reader2 = None
prev_index = -1
for domain_index in range(144):
print ("Processing sources in domain {0} of {1}" .format(domain_index, 144))
reader1 = Reader(1, 0, original_data, border = 15)
reader2 = DenoisedReader(144, domain_index, denoised_data, filepath_header = original_data, border = 15)
print(" x: {0} - {1}, y: {2} - {3}".format(reader2.xmin, reader2.ymin, reader2.xmax, reader2.ymax))
reader1.xmin, reader1.ymin, reader1.xmax, reader1.ymax = reader2.xmin, reader2.ymin, reader2.xmax, reader2.ymax
reader1._read_header()
sources = TruthSource.catalog_to_sources_in_domain(filepath_truth, reader1)
print("Domain has {0} sources".format(len(sources)))
for s in sources:
if s.line_flux_integral() < 60: continue
ID = int(s.ID())
orig_name = "{0}/cutout_original_{1}".format(training_filepath, ID)
denoised_name = "{0}/cutout_denoised_{1}".format(training_filepath, ID)
if skip_existing and Path(orig_name).is_file():
continue
if not reader1.is_data_loaded(): reader1.read()
if not reader2.is_data_loaded(): reader2.read()
orig_cutout = reader1.get_cutout( (s.x(),s.y(),s.z()), 15, 100)
denoised_cutout = reader2.get_cutout( (s.x(),s.y(),s.z()), 15, 100)
if orig_cutout.shape != denoised_cutout.shape or orig_cutout.shape[0] == 0:
print("************* Skipping problematic cutout ************* ")
print("* ",orig_cutout.shape, denoised_cutout.shape)
print("* ",s.x(),s.y(),s.z())
print("******************************************************* ")
continue
plane = np.sum( denoised_cutout, axis = 0)
if asymmetry(plane) < -0.05:
print("========== weird source ========== ")
print("=",reader2)
print("= ID:",s.ID(),"pos:",s.x(),s.y(),s.z())
print("================================== ")
np.save(orig_name, orig_cutout)
np.save(denoised_name, denoised_cutout)
#==========================================
def permute_training_data(training_filepath, file_truth, permuted_training_filepath):
from scipy import ndimage
file_truth_permuted = "{0}/{1}".format(permuted_training_filepath, file_truth.split('/')[-1].replace('.txt','_permute.txt'))
sources = TruthSource.catalog_to_sources(file_truth)
files_original = glob.glob(training_filepath + "*original*.npy")
files_original.sort()
files_denoised = glob.glob(training_filepath + "*denoised*.npy")
files_denoised.sort()
assert len(files_original) == len(files_denoised)
print("Permuting {0} files".format(len(files_original) + len(files_denoised)))
source_count = 0
with open(file_truth_permuted, 'w') as outfile:
for s in sources:
ID = int(s.ID())
index = -999
d1 = None
for i, f in enumerate(files_original):
file_ID = int(f.split('.')[0].split('_')[-1])
if file_ID != ID: continue
d1 = np.load(f)
index = i
break
if index == -999: continue
f_denoised = files_denoised[index]
file_ID = int(f.split('.')[0].split('_')[-1])
assert file_ID == ID
d2 = np.load(f_denoised)
assert d1.shape == d2.shape
print("Now permuting source {0}".format(ID))
rotations = np.linspace(45,315,13)
for r in rotations:
s_array = np.copy(s.data)
d1_p = ndimage.rotate(d1, r, axes=(1, 2), mode = 'reflect', reshape = False)
d2_p = ndimage.rotate(d2, r, axes=(1, 2), mode = 'reflect', reshape = False)
s_array[6] += r
s_array[6] %= 360
np.save("{0}/cutout_original_{1}.npy".format(permuted_training_filepath, source_count), d1_p)
np.save("{0}/cutout_denoised_{1}.npy".format(permuted_training_filepath, source_count), d2_p)
out_source = ' '.join([ str(s_array[i]) for i in range(1,9)])
out_source = "{0} {1}\n".format(source_count, out_source)
print(" " ,out_source[:-2])
outfile.write(out_source)
source_count += 1
#if source_count > 100: break
if __name__ == "__main__":
############################################
# Setup
############################################
#===== MPI =====
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
print("This is task %d out of %d" % (comm.rank, comm.size))
except:
size = 1
rank = 0
#===== filepaths =====
filepath_truth = "/scratch/etolley/SDC2/dev/sky_ldev_truthcat_v2.txt"
remake_training_data = False
transform_type = 'log' # 'quantile' or 'log' or 'power'
doubled_data = False
training_filepath = "/scratch/etolley/SDC2/dev_regressor_training_large/"
if remake_training_data:
make_training_data(size, rank, training_filepath, filepath_truth, skip_existing = False)
training_filepath_permuted = "/scratch/etolley/SDC2/dev_regressor_training_permute13/"
filepath_truth_permuted = "/scratch/etolley/SDC2/dev_regressor_training_permute13/sky_ldev_truthcat_v2_permute.txt"
if not Path(filepath_truth_permuted).is_file():
permute_training_data(training_filepath, filepath_truth, training_filepath_permuted)
############################################
# Run pipeline
############################################
#===== initialize modules =====
# CNN wrapper
cutout_dim = (200, 60 if doubled_data else 30, 30, 1)
cnn = CNN(cutout_dim) #InputMode.SPREAD THREEDEE
cnn.out_dict = {
"Line Flux Integral": lambda x: x.line_flux_integral(),
"HI size": lambda x: x.hi_size(),
"Cos(Pos A)": lambda x: np.cos(x.pos_a()*np.pi/180),
"Sin(Pos A)": lambda x: np.sin(x.pos_a()*np.pi/180),
"Inc A": lambda x: x.inc_a(),
"w20":lambda x: x.w20(),
}
n_epochs = 1000
flux_threshold = 20
augmentstr = "test-permute"
outname = "data/CNN_regressor-inception-{0}_{1}".format(transform_type, "2x" if doubled_data else "1x")
outname += "_th-{0}_vars-{1}_aug-{2}_epochs-{3}".format(flux_threshold, cnn.n_out_params, augmentstr, n_epochs)
print("####################\nWriting all outputs to {0}*\n####################".format(outname))
#===== run modules =====
from modules.ai.generator import DataGenerator as Gen
generator = Gen(path_original = training_filepath_permuted + "*original*.npy",
path_denoised = training_filepath_permuted + "*denoised*.npy" if doubled_data else None,
path_truth = filepath_truth_permuted, properties = cnn.out_dict, dim = cutout_dim,
batch_size = 32, transform_type = transform_type)
generator.augment = AugmentMode.TRAIN # FAST TRAIN FULL
generator.load_data(flux_threshold = flux_threshold)
generator.gen_by_flux = True
from pickle import dump
dump(generator.input_sf, open('data/regressor_input_transform.pkl', 'wb'))
validation_generator = Gen(path_original = training_filepath + "*original*.npy",
path_denoised = training_filepath + "*denoised*.npy" if doubled_data else None,
path_truth = filepath_truth, properties = cnn.out_dict, dim = cutout_dim, batch_size = 32,
transform_type = transform_type)
validation_generator.load_data(flux_threshold = flux_threshold)
validation_generator.gen_by_flux = True
print("########################")
fig, axs = plt.subplots(2,10, figsize=(12, 5))
plt.tight_layout()
for i in range(10):
d = generator[i][0][0]
print(d.shape, np.min(d), np.max(d))
axs[0,i].imshow(np.sum(d, axis = 0))
axs[1,i].imshow(np.sum(d, axis = 2))
plt.savefig("check_regression_training_data.png")
print("########################")
print("Now defining and compiling model...")
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
cnn.build_architecture()
#tf.keras.utils.plot_model(cnn.model, to_file=outname + 'structure.png', show_shapes=True, show_layer_names=True)
print("Now training...")
# simple early stopping
checkpoint_filepath = '/tmp/cnn_reg_checkpoint'
checkpoint_filepath2 = '/tmp/cnn_reg_checkpoint2'
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
mcp_save = tf.keras.callbacks.ModelCheckpoint(checkpoint_filepath,
save_weights_only=True,
save_best_only=True,
monitor='val_loss', mode='min', verbose = 1)
#with tf.device("device:GPU:0"): #/device:XLA_GPU:0
history1= cnn.model.fit(generator, epochs=n_epochs, validation_data = validation_generator,
callbacks = [ mcp_save, es]) #validation_data = generator_val
cnn.model.load_weights(checkpoint_filepath)
min_loss = np.min(history1.history['val_loss'])
outname1 = outname + "_1st-training_"+ ("loss{0:.3f}".format(min_loss)).replace('.','p')
cnn.model.save(outname1 + "_network")
#try:
# tf.keras.utils.plot_model(cnn.model, to_file=outname + 'structure.png', show_shapes=True, show_layer_names=True)
#except:
# "Unable to make network plot"
#=================================================
fig, axs = plt.subplots(2, cnn.n_out_params + 1, figsize=(16, 6))
plt.subplots_adjust(wspace=0.5, hspace=0.4)
try:
axs[0,0].plot(history1.history['loss'])
axs[0,0].plot(history1.history['val_loss'])
axs[0,0].set_yscale('log')
axs[0,0].set(ylabel = "Loss (Mean Square Error)", xlabel = "Training Epoch")
axs[1,0].plot(history1.history['mae'])
axs[1,0].plot(history1.history['val_mae'])
axs[1,0].set_yscale('log')
axs[1,0].set(ylabel = "Mean Absolute Error", xlabel = "Training Epoch")
except: pass
val_data, val_truth = validation_generator.X, validation_generator.Y
val_predict = cnn.model.predict( val_data)
if transform_type == "log":
val_predict_orig = transforms.inv_transform(np.copy(val_predict))
elif transform_type == "quantile":
val_predict_orig = generator.qt.inverse_transform(np.copy(val_predict))
elif transform_type == "power":
val_predict_orig = generator.pt.inverse_transform(np.copy(val_predict))
val_truth_orig = validation_generator.Y_orig
train_data, train_truth = generator[0]
train_predict = cnn.model.predict( train_data)
print("N params predicted:", cnn.n_out_params)
for i in range(cnn.n_out_params):
p_min, p_max = 0, 1
axs[0,1+i].plot([p_min,p_max], [p_min,p_max], 'k-')
axs[0,1+i].plot(train_truth[:,i], train_predict[:,i], 'c.')
axs[0,1+i].plot(val_truth[:,i], val_predict[:,i], 'bx')
#axs[1,1+i].plot(train_truth_orig[:,i], train_predict_orig[:,i], 'c.')
p_min, p_max = np.min(val_truth_orig[:,i]), np.max(val_truth_orig[:,i])
axs[1,1+i].plot([p_min,p_max], [p_min,p_max], 'k-')
axs[1,1+i].plot(val_truth_orig[:,i], val_predict_orig[:,i], 'bx')
axs[1,1+i].set_xlim([p_min, p_max])
axs[1,1+i].set_ylim([p_min, p_max])
axs[0,1+i].set(ylabel = "Prediction {0}".format(cnn.out_names[i]),
xlabel = "Truth {0}".format(cnn.out_names[i]))
axs[1,1+i].set(ylabel = "Prediction {0}".format(cnn.out_names[i]),
xlabel = "Truth {0}".format(cnn.out_names[i]))
plt.savefig(outname1 + "_plot")
#plt.show()
#plt.clf()
##plt.plot(history.history['mean_absolute_error'])
#plt.plot(history.history['val_mean_absolute_error'])
#plt.title('2 conv 2 max pooling 2 dense - 16,8 and 8,4 filters -lr =0.1 - 5sigma ')
#plt.savefig('/home/aliqoliz/outputs/acc_flux.png')
#plt.show()
|
epfl-radio-astroREPO_NAMELiSAPATH_START.@LiSA_extracted@LiSA-main@utils@train_regressor_CNN.py@.PATH_END.py
|
{
"filename": "gmosaicparam.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/gempy/gemini/eti/gmosaicparam.py",
"type": "Python"
}
|
import os
from pyraf import iraf
from gempy.utils import logutils
from gempy.eti_core.pyrafetiparam import PyrafETIParam, IrafStdout
log = logutils.get_logger(__name__)
class GmosaicParam(PyrafETIParam):
"""This class coordinates the ETI parameters as it pertains to the IRAF
task gireduce directly.
"""
inputs = None
params = None
key = None
value = None
def __init__(self, inputs=None, params=None, key=None, value=None):
"""
:param rc: Used to store reduction information
:type rc: ReductionContext
:param key: A parameter name that is added as a dict key in prepare
:type key: any
:param value: A parameter value that is added as a dict value
in prepare
:type value: any
"""
log.debug("GmosaicParam __init__")
PyrafETIParam.__init__(self, inputs, params)
self.key = key
self.value = value
def nonecheck(self, param=None):
if param is None or param == "None":
param = "none"
return param
def prepare(self):
log.debug("Gmosaic prepare()")
self.paramdict.update({self.key:self.value})
class FlPaste(GmosaicParam):
inputs = None
params = None
fl_paste = None
def __init__(self, inputs=None, params=None):
log.debug("FlPaste __init__")
GmosaicParam.__init__(self, inputs, params)
tile = self.nonecheck(params["tile"])
if tile == "none" or tile == False:
self.fl_paste = iraf.no
else:
self.fl_paste = iraf.yes
def prepare(self):
log.debug("Flpaste prepare()")
self.paramdict.update({"fl_paste":self.fl_paste})
class FlFixpix(GmosaicParam):
inputs = None
params = None
fl_fixpix = None
def __init__(self, inputs=None, params=None):
log.debug("FlFixpix __init__")
GmosaicParam.__init__(self, inputs, params)
igaps = self.nonecheck(params["interpolate_gaps"])
if igaps == "none" or igaps == False:
self.fl_fixpix = iraf.no
else:
self.fl_fixpix = iraf.yes
def prepare(self):
log.debug("FlFixpix prepare()")
self.paramdict.update({"fl_fixpix":self.fl_fixpix})
class Geointer(GmosaicParam):
inputs = None
params = None
geointer = None
def __init__(self, inputs=None, params=None):
log.debug("Geointer __init__")
GmosaicParam.__init__(self, inputs, params)
inter = self.nonecheck(params["interpolator"])
if inter == "none":
inter = "linear"
self.geointer = inter
def prepare(self):
log.debug("Geointer prepare()")
self.paramdict.update({"geointer":self.geointer})
class FlVardq(GmosaicParam):
inputs = None
params = None
fl_vardq = None
ad = None
def __init__(self, inputs=None, params=None, ad=None):
log.debug("FlVardq __init__")
GmosaicParam.__init__(self, inputs, params)
#if ad.count_exts("VAR") == ad.count_exts("DQ") == ad.count_exts("SCI"):
if ad.variance is not None and ad.mask is not None:
self.fl_vardq = iraf.yes
else:
self.fl_vardq = iraf.no
def prepare(self):
log.debug("FlVardq prepare()")
self.paramdict.update({"fl_vardq":self.fl_vardq})
class FlClean(GmosaicParam):
inputs = None
params = None
fl_clean = None
ad = None
def __init__(self, inputs=None, params=None, ad=None):
log.debug("FlClean __init__")
GmosaicParam.__init__(self, inputs, params)
self.fl_clean = iraf.yes
# this should not be needed anymore now that BPMs exist.
# if ad.detector_name(pretty=True) == 'Hamamatsu-N':
# self.fl_clean = iraf.no
# else:
# self.fl_clean = iraf.yes
def prepare(self):
log.debug("FlClean prepare()")
self.paramdict.update({"fl_clean":self.fl_clean})
mosaic_detectors_hardcoded_params = {"Stdout" : IrafStdout(),
"Stderr" : IrafStdout()}
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@gempy@gemini@eti@gmosaicparam.py@.PATH_END.py
|
{
"filename": "_labelssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/treemap/_labelssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="labelssrc", parent_name="treemap", **kwargs):
super(LabelssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@treemap@_labelssrc.py@.PATH_END.py
|
{
"filename": "quasiperiodic_cosine_kernel.md",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/docs/gaussian_process/quasiperiodic_cosine_kernel.md",
"type": "Markdown"
}
|
(quasiperiodic_cosine_kernel)=
# Quasi-periodic with cosine kernel
This kernel has been introduced by [Perger et al. 2021](https://ui.adsabs.harvard.edu/abs/2021A%26A...645A..58P/abstract). The kernel has been implemented in `PyORBIT` without relying on any other package.
If we define $\tau = t_i-t_j$ :
```{math}
:label: quasiperiodic_cosine
G(\tau ) = \exp{\frac{-2 \tau^2}{P_\mathrm{dec}^2}} * \left [ H_\mathrm{amp}^2 \exp{-\frac {
\sin^2{( \pi \tau / P_\mathrm{rot} )}}{2 O_\mathrm{amp} ^2}} + C_\mathrm{amp}^2 \cos \frac{ 4\pi \tau}{P_\mathrm{rot}} \right ]
```
where $P_\mathrm{rot}$ is equivalent to the rotation period of the star, $O_\mathrm{amp}$ is the coherence scale, and $P_\mathrm{dec} $ is usually associated with the decay time scale of the active regions. Within `PyORBIT`, the amplitude of the quasi-periodic part of the kernel $h_1$ and the amplitude of the cosine part $h_2$ have been labeled as `Hamp` and `Camp` respectively.
```{important}
As for the quasi-periodic kernel, mind the possible presence of a factor 2 in the denominator of the aperiodic variation (i.e., $2 \lambda$ rather than $\lambda$)
```
## Model definition and requirements
**model name**: `tinygp_quasiperiodic_cosine`
- required common object: `activity`
- this model relies on `tinygp`
**model name**: `gp_quasiperiodic_cosine`
- required common object: `activity`
- *direct* implementation relying only on `scipy` and `numpy`
## Model parameters
The following parameters will be inherited from the common model (column *Common?: common*) or a different value will be assigned for each dataset (column *Common?: dataset*)
| Name | Parameter | Common? | Definition | Notes |
| :--- | :-------- | :------------- | :----- | :---- |
| Prot | Rotational period of the star $\theta$ | common | ``activity`` | |
| Pdec | Decay time scale of active regions $\lambda$ | common | ``activity`` | |
| Oamp | Coherence scale $w$ | common | ``activity`` | |
| Hamp | Amplitude of the kernel | dataset | ``activity`` | |
| Camp | Amplitude of the cosine part of the kernel | dataset | ``activity`` | |
## Keywords
Model-wide keywords, with the default value in boldface.
**hyperparameters_condition**
* accepted values: `True` | **`False`**
* activate the conditions $ \lambda ^ 2 > (3/4 \pi) \theta ^2 w ^ 2 $ (adapted from [Rajpaul 2017](https://ui.adsabs.harvard.edu/abs/2017PhDT.......229R/abstract) and [Rajpaul et al. 2021](https://ui.adsabs.harvard.edu/abs/2021MNRAS.507.1847R/abstract) to take into account the factor 2 in the denominator of the aperiodic variation) to ensure that the QP function has at least one non-trivial turning point.
**rotation_decay_condition**
* accepted values: `True` | **`False`**
* if activated, it ensures that the decay time scale of the activity regions $\lambda$ is at least twice the rotational period of the star $\theta$
**use_stellar_rotation_period**
* accepted values: `True` | **`False`**
* if activated, the parameter `Prot` from the `activity` *common model* will be replaced by the parameter `rotation_period` from the `star_parameters` *common model*. In this way, a unique parameter can be used by different models, e.g., stellar activity and Rossiter-McLaughlin modeling. It can also be useful if you want to use independent GP hyperparameters over several observational seasons while using a single parameter for the rotational period of the star.
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@docs@gaussian_process@quasiperiodic_cosine_kernel.md@.PATH_END.py
|
{
"filename": "single_trapezoid_event.py",
"repo_name": "rice-solar-physics/ebtelPlusPlus",
"repo_path": "ebtelPlusPlus_extracted/ebtelPlusPlus-main/examples/single_trapezoid_event.py",
"type": "Python"
}
|
"""
Using an Asymmetric Heating Profile Under the Single-fluid Approximation
========================================================================
In this example, we force the electron and ion populations to have the
same temperature to illustrate the single fluid case.
"""
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.visualization import quantity_support
import ebtelplusplus
from ebtelplusplus.models import DemModel, HeatingEvent, HeatingModel, PhysicsModel
quantity_support()
##################################################
# Set up a trapezoidal heating profile that rises
# for 250 s, stays constant for 750 s at a heating
# rate of 0.05 erg per cubic centimeter per second,
# and then decays linearly to the background rate
# over the course of 1000 s.
event = HeatingEvent(0*u.s,
250*u.s,
1000*u.s,
2000*u.s,
0.005*u.Unit('erg cm-3 s-1'))
##################################################
# In this heating model, we equally partition the
# injected energy between the electrons and the ions.
heating = HeatingModel(background=3.5e-5*u.Unit('erg cm-3 s-1'),
partition=0.5,
events=[event])
##################################################
# Note that we also need to enforce the single-fluid
# requirement in our physics model.
physics = PhysicsModel(force_single_fluid=True)
##################################################
# Now run the simulation for a 40 Mm loop lasting
# a total of 3 h. We'll also specify that we
# want to compute the DEM
result = ebtelplusplus.run(5e3*u.s,
40*u.Mm,
heating,
physics=physics,
dem=DemModel(calculate_dem=True))
##################################################
# Let's visualize the heating profile, temperature,
# and density as a function of time.
fig, axes = plt.subplots(3, 1, sharex=True)
axes[0].plot(result.time, result.heat)
axes[1].plot(result.time, result.electron_temperature, label='electron')
axes[1].plot(result.time, result.ion_temperature, label='ion')
axes[2].plot(result.time, result.density)
axes[1].legend()
##################################################
# Finally, let's visualize the DEM distribution.
# We'll first time-average each component over the
# duration of the simulation.
delta_t = np.gradient(result.time)
dem_avg_total = np.average(result.dem_tr+result.dem_corona,
axis=0,
weights=delta_t)
dem_avg_tr = np.average(result.dem_tr,
axis=0,
weights=delta_t)
dem_avg_corona = np.average(result.dem_corona,
axis=0,
weights=delta_t)
##################################################
# And now we can plot each component
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(result.dem_temperature, dem_avg_total, label='Total')
ax.plot(result.dem_temperature, dem_avg_tr, label='TR')
ax.plot(result.dem_temperature, dem_avg_corona, label='Corona')
ax.set_xlim([10**(4.5), 10**(7.5)]*u.K)
ax.set_ylim([10**(20.0), 10**(23.5)]*u.Unit('cm-5 K-1'))
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend()
plt.show()
|
rice-solar-physicsREPO_NAMEebtelPlusPlusPATH_START.@ebtelPlusPlus_extracted@ebtelPlusPlus-main@examples@single_trapezoid_event.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/yaxis/title/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.yaxis.title"
_path_str = "layout.yaxis.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.yaxis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.yaxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.yaxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@yaxis@title@_font.py@.PATH_END.py
|
{
"filename": "test_migration.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/_internal/compatibility/test_migration.py",
"type": "Python"
}
|
import importlib
import pytest
from prefect._internal.compatibility.migration import MOVED_IN_V3, REMOVED_IN_V3
from prefect.exceptions import PrefectImportError
def import_from(dotted_path: str):
"""
Import an object from a dotted path.
This function dynamically imports an object (such as a class, function, or variable)
from a specified module using a dotted path. The dotted path should be in the format
"module.submodule:object_name", where "module.submodule" is the full module path and
"object_name" is the name of the object to be imported from that module.
Args:
dotted_path (str): A string representing the module and object to import,
separated by a colon. For example, "path.to.module:object_name".
Returns:
object: The imported object specified by the dotted path.
Raises:
ModuleNotFoundError: If the module specified in the dotted path cannot be found.
AttributeError: If the object specified in the dotted path does not exist
in the module.
ValueError: If the dotted path is not in the correct format. This can happen
if the dotted path is missing the colon separator. Raising this error
will provide a helpful message to the developer who adds new objects to
MOVED_IN_V3 or REMOVED_IN_V3 without following the correct format.
Example:
To import the `MyClass` class from the `my_package.my_module` module,
you would use:
```python
MyClass = import_from("my_package.my_module:MyClass")
```
Equivalent to:
```python
from my_package.my_module import MyClass
```
"""
try:
module_path, object_name = dotted_path.rsplit(":", 1)
module = importlib.import_module(module_path)
return getattr(module, object_name)
except ValueError as exc:
if "not enough values to unpack" in str(exc):
raise ValueError(
"Invalid dotted path format. Did you mean 'module.submodule:object_name' instead of 'module.submodule.object_name'?"
) from exc
except Exception as exc:
raise exc
@pytest.mark.parametrize("module", MOVED_IN_V3.keys())
def test_moved_in_v3(module):
with pytest.warns(DeprecationWarning, match=MOVED_IN_V3[module]):
import_from(module)
@pytest.mark.parametrize("module", REMOVED_IN_V3.keys())
def test_removed_in_v3(module):
with pytest.raises(PrefectImportError):
import_from(module)
assert False, f"{module} should not have been importable"
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@_internal@compatibility@test_migration.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "tcollett/LensPop",
"repo_path": "LensPop_extracted/LensPop-master/pylens/__init__.py",
"type": "Python"
}
|
import massmodel,pylens,MassModels
|
tcollettREPO_NAMELensPopPATH_START.@LensPop_extracted@LensPop-master@pylens@__init__.py@.PATH_END.py
|
{
"filename": "test_threshold.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/test/test_threshold.py",
"type": "Python"
}
|
# Copyright (C) 2012 Alex Nitz, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Unit tests for PyCBC's thresholding code.
"""
import unittest
import numpy
from pycbc.types import Array, complex64
from pycbc.events import threshold
from utils import parse_args_all_schemes, simple_exit
_scheme, _context = parse_args_all_schemes("Threshold")
from pycbc.events.threshold_cpu import threshold_numpy as trusted_threshold
class TestThreshold(unittest.TestCase):
def setUp(self, *args):
self.context = _context
self.scheme = _scheme
r = numpy.random.uniform(low=-1, high=1.0, size=2**20)
i = numpy.random.uniform(low=-1, high=1.0, size=2**20)
v = r + i * 1.0j
self.series = Array(v, dtype=complex64)
self.threshold = 1.3
self.locs, self.vals = trusted_threshold(self.series, self.threshold)
print(f'Reference: {len(self.locs)} locs, {len(self.vals)} vals')
def test_threshold(self):
with self.context:
locs, vals = threshold(self.series, self.threshold)
print(f'Test: {len(locs)} locs, {len(vals)} vals')
self.assertTrue((locs == self.locs).all())
self.assertTrue((vals == self.vals).all())
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestThreshold))
if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results)
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@test@test_threshold.py@.PATH_END.py
|
{
"filename": "INSTALL.md",
"repo_name": "veusz/veusz",
"repo_path": "veusz_extracted/veusz-master/INSTALL.md",
"type": "Markdown"
}
|
# Veusz Installation
## Installation methods
1. Provided binaries for x86-64 Linux, Windows and MacOS - please go
to the [download page](https://veusz.github.io/download/). See also
below for further information.
2. Packages for your Linux distribution, provided by the
distribution. These are often older than the current version.
3. [PPA](https://launchpad.net/~jeremysanders/+archive/ubuntu/ppa) for
Ubuntu distribution, which we try to keep up to date.
4. [flatpak](https://flathub.org/apps/details/io.github.veusz.Veusz) runs on many linux distributions.
5. Anaconda conda-forge [package](https://anaconda.org/conda-forge/veusz).
6. Source build, download from the [download
page](https://veusz.github.io/download/) and see below for build
instructions.
## Provided binaries
### Windows
Simply run the setup.exe binary installer. Add the location of the
embed.py file to your `PYTHONPATH` of your Python installation if you
want to use the embedding module.
### Linux
Unpack the provided tar file and run the `veusz.exe` file inside
(please note that the `.exe` extension does not mean a Windows
executable here!):
$ tar xf veusz-3.6-linux-x86_64.tar.xz [change version here]
$ cd veusz-3.6-linux-x86_64
$ ./veusz.exe
Note that this may not work on all distributions due to glibc or other
library incompatibilities.
### MacOS
Simply drag the Veusz application into your Applications
directory. Please note that unfortunately due to signing requirements,
you will need to disable quarantine for it to run. Please see
[this github issue](https://github.com/veusz/veusz/issues/630#issuecomment-1305817737).
## Installing from source
### Requirements
* [Python](https://www.python.org/) >= 3.3
* [Qt](https://www.qt.io/developers/) >= 5.5
* [SIP](https://www.riverbankcomputing.co.uk/software/sip/) >= 5
* [PyQt](https://www.riverbankcomputing.co.uk/software/pyqt/) >= 5.3
* [numpy](https://numpy.org/) >= 1.7
### Optional requirements
* [h5py](https://www.h5py.org/), for HDF5 file support
* [astropy](https://www.astropy.org/), for FITS file support
* [pyemf3](https://github.com/jeremysanders/pyemf3) >= 3.3, for EMF output
* [iminuit](https://github.com/iminuit/iminuit) >= 2, for better fitting
* [Ghostscript](https://www.ghostscript.com/), for EPS/PS output
* [dbus-python](https://dbus.freedesktop.org/doc/dbus-python/), for D-BUS support
* [Sphinx](https://www.sphinx-doc.org/en/master/), to rebuild the documentation
### Installation with setuptools
Veusz provides a standard setuptools `setup.py` file. If installing
this locally, it may be better to create a Python virtual environment
so that it is self contained and does not interfere with existing
Python dependencies and packages.
### Building and running inside a virtual environment
An example use of a virtual environment to build veusz would be
$ python3 -m venv /path/to/virtual/environment [setup environment]
$ source /path/to/virtual/environment/bin/activate [activate it]
$ pip3 install numpy PyQt5 sip astropy h5py tomli [install necessary requirements]
$ pip3 install h5py astropy iminuit [install optional requirements]
$ pip3 install https://github.com/jeremysanders/pyemf3.git [optional, for EMF output]
$ tar xf veusz-3.5.tar.gz [unpack veusz source]
$ cd veusz-3.5
$ pip3 install -v . [build and install veusz from current directory]
However, for the above to work requires a working Qt5 development
installation. This can be your Linux distribution's Qt packages,
binaries download from the Qt website, or a Qt build from source. A
quick way to install Qt binaries on different platforms can be using
the [aqtinstall](https://github.com/miurahr/aqtinstall) command line
installer.
### Installing into system Python directories
This needs write premissions into the destination directory, so `sudo`
may be required.
$ tar xf veusz-3.5.tar.gz [unpack veusz source]
$ cd veusz-3.5
$ pip3 install -v . [build and install veusz from current directory]
On Ubuntu/Debian systems the following packages are necessary:
$ apt install libqt5core5a libqt5gui5 libqt5svg5 libqt5widgets5 \
python3-all python3-all-dev python3-astropy python3-h5py \
python3-numpy python3-pyqt5 python3-pyqt5.qtsvg \
python3-sipbuild python3-tomli \
pyqt5-dev pyqt5-dev-tools qt5-qmake qtbase5-dev sip-tools
On Fedora the following are required:
$ dnf install python3-devel python3-setuptools \
python3-numpy qt5-qtbase-devel qt5-qtsvg-devel \
python3-qt5 python3-qt5-devel python3-pyqt5-sip \
python3-h5py python3-tomli
Other Unix or Linux systems will likely contain the needed packages.
### Testing
After veusz has been installed into the Python path (in the standard
location or in `PYTHONPATH`), you can run the `runselftest.py`
executable in the `tests` directory. This will compare the generated
output of example documents with the expected output. The return code
of the `runselftest.py` script is the number of tests that have failed
(0 for success).
On Unix/Linux, Qt requires the `DISPLAY` environment to be set to an
X11 server for the self test to run. Packagers can use Xvfb in a non
graphical environment to create a hidden X11 server:
$ xvfb-run -a --server-args "-screen 0 640x480x24" \
python3 tests/runselftest.py
Alternatively, the Qt platform can be switched to minimal to avoid the
use of X11:
$ QT_QPA_PLATFORM=minimal python3 tests/runselftest.py
Please note that the environment variable `VEUSZ_INPLACE_TEST` is set,
then the `PYTHONPATH` are set to include the current working
directory, making it easier to run the self tests in automated scripts
without installation.
### Building and running in-place
If you don't want to install veusz fully or are doing development, it
can currently be run from its own directory. Before this can work, the
`helpers` modules must be compiled and copied into the appropriate
location.
$ tar xzf veusz-3.6.tar.gz [change version here]
$ cd veusz-3.6
$ python3 setup.py build
$ cp build/*/veusz/helpers/*.so veusz/helpers/
### Notes for packagers
* It is recommended to run the self test above (if possible).
* Veusz needs access to several subdirectories containing resource
files, which are by default installed in the veusz module directory.
These include the current version (`VERSION`), licence (`COPYING`),
icons (`icons` subdirectory), user-interface description (`ui`
subdirectory) and examples (`examples` subdirectory). This location
may not be desired by unix packagers, for example, who want to
separate the code from the data files.
It is possible to install these files in a different location by
using the setup.py option `--veusz-resource-dir` (for example with
`/usr/share/veusz`). If you do this, then you need to tell veusz
where these resources are at runtime or when testing. This can be
done by using a symlink `resources` in the the veusz module
directory which points to the location of these files and
directories. Alternatively, the environment variable
`VEUSZ_RESOURCE_DIR` can be set.
There is an addition setup.py option `--disable-install-examples`
which disables installation of the example files. This may be
helpful for packagers who want to place the example files in
`/usr/share/doc`. As veusz shows these files on the help menu, it is
suggested that an `examples` symlink is added to the resources
directory to point to the location of the example files.
- Veusz is mostly platform-independent python code and data files with
a separate `helpers` module containing platform-dependent code. It
may save space in repositories to separate out the helpers
sub-module.
- Veusz includes a man page in `Documents/man-page/veusz.1`. This is
not automatically installed by setuptools.
- A manual in HTML and PDF format can be found in `Documents/manual/`.
This and the the man page can be regenerated using the Makefile in
Documents, if Sphinx is installed (`make clean; make`).
- Veusz also includes freedesktop mime, desktop and appdata files in
the `support` subdirectory which can be installed to better
integrate with desktop environments.
- Icons are also included in the icons directory with the names
`veusz_16.png`, `_32`, `_48`, `_64` and `_128`. A scalable icon can
be found in `veusz.svg`.
- Veusz will periodically (once per week) check for updates. This can
be disabled by patching `veusz/utils/version.py` to set
`disableVersionChecks=True`.
- Veusz will automatically send anonymous feedback (after
confirmation) to the developers giving version information and
counts of feature use. This can be disabled by patching
`veusz/utils/feedback.py` to set `disableFeedback=True`.
|
veuszREPO_NAMEveuszPATH_START.@veusz_extracted@veusz-master@INSTALL.md@.PATH_END.py
|
{
"filename": "splitpol_FASTpsrfits.py",
"repo_name": "qianlivan/RPPPS",
"repo_path": "RPPPS_extracted/RPPPS-master/splitpol_FASTpsrfits.py",
"type": "Python"
}
|
#!/usr/bin/env python
import numpy as np
import pyfits
import os
import datetime
import time
import sys
from array import array
import matplotlib as mpl
import matplotlib.pyplot as plt
from pylab import *
#------------------------------------------------------------------
# Written by Lei QIAN
# version 20190529
# 20190609 adapted from cut_FASTpsrfits_freq_time_splitpol.py
# 20161009 dimension of DAT_OFFS changed from chnum*2 to chnum
# format of DAT_OFFS changed from dataformat3 to dataformat2
# size(float_data)/nline/nchan/npol=nsblk
# 20161008 adapted from cut_FASTpsrfits_freq_time.py
# output 2 pol and pol averaged data
# Usage:
# python splitpol_FASTpsrfits_freq_time_splitpol.py fileroot filename
# Example:
# python splitpol_FASTpsrfits_freq_time_splitpol.py output FAST.fits
#------------------------------------------------------------------
#mpl.rcParams['image.interpolation']='none'
if (len(sys.argv)<3):
#if (len(sys.argv)<2):
print 'too few inputs!'
print 'example:'
#print 'python cut_FASTpsrfits_freq_time_splitpol.py startchan endchan startn endn FAST.fits'
print 'python fitsio_splitpol_FASTpsrfits.py root filename'
sys.exit()
else:
fileroot=sys.argv[1]
filename=sys.argv[2]
starttime=datetime.datetime.now()
#u19700101=62135683200.0
hdulist = pyfits.open(filename)
hdu0 = hdulist[0]
data0 = hdu0.data
header0 = hdu0.header
print data0
hdu1 = hdulist[1]
data1 = hdu1.data
header1 = hdu1.header
nchan=header0['OBSNCHAN']
nsblk=header1['NSBLK']
npol=header1['NPOL']
tbin=header1['TBIN']
chan_bw=header1['CHAN_BW']
nline=header1['NAXIS2']
nsblk=header1['NSBLK']
#float_indexval=np.array(data1['INDEXVAL'])
float_tsubint=np.array(data1['TSUBINT'])
float_offs_sub=np.array(data1['OFFS_SUB'])
float_lst_sub=np.array(data1['LST_SUB'])
float_ra_sub=np.array(data1['RA_SUB'])
float_dec_sub=np.array(data1['DEC_SUB'])
float_glon_sub=np.array(data1['GLON_SUB'])
float_glat_sub=np.array(data1['GLAT_SUB'])
float_fd_ang=np.array(data1['FD_ANG'])
float_pos_ang=np.array(data1['POS_ANG'])
float_par_ang=np.array(data1['PAR_ANG'])
float_tel_az=np.array(data1['TEL_AZ'])
float_tel_zen=np.array(data1['TEL_ZEN'])
#float_aux_dm=np.array(data1['AUX_DM'])
#float_aux_rm=np.array(data1['AUX_RM'])
float_data=np.array(data1['DATA'])
temp_float_dat_scl=np.array(data1['DAT_SCL'])
print size(float_data)
print size(temp_float_dat_scl)/npol/nchan
float_dat_freq=np.zeros([nline,nchan])
float_dat_wts=np.zeros([nline,nchan])
float_dat_freq=np.array(data1['DAT_FREQ'])[0:nline,0:nchan]
float_dat_wts=np.array(data1['DAT_WTS'])[0:nline,0:nchan]
float_dat_offs=np.zeros([nline,nchan])
float_dat_scl=np.zeros([nline,nchan])
float_dat_offs=np.array(data1['DAT_OFFS'])[0:nline,0:nchan]
float_dat_scl=np.array(data1['DAT_SCL'])[0:nline,0:nchan]
print size(float_dat_freq),size(np.array(data1['DAT_FREQ']))
float_data2=np.zeros([nline,nsblk*nchan])
float_data3=np.zeros([nline,nsblk*nchan])
float_data_tot=np.zeros([nline,nsblk*nchan])
dataformat=str(nsblk*nchan)+'B'
for i in range(nline):
temp_data=float_data[i,:].reshape([size(float_data[i,:])/nchan/npol,npol*nchan])
temp_data2=temp_data[:,0:nchan].reshape(size(float_data[i,:])/nchan/npol*nchan)
temp_data3=temp_data[:,nchan:2*nchan].reshape(size(float_data[i,:])/nchan/npol*nchan)
temp_data_tot=(temp_data2+temp_data3)/2
# float_data2[i, :]=temp_data2
# float_data3[i, :]=temp_data3
float_data_tot[i, :]=temp_data_tot
dataformat2=str(nchan)+'E'
print dataformat,dataformat2
#column1_data = pyfits.Column(name='INDEXVAL',format='1D',array=float_indexval)
column2_data = pyfits.Column(name='TSUBINT',format='1D',array=float_tsubint,unit='s')
column3_data = pyfits.Column(name='OFFS_SUB',format='1D',array=float_offs_sub,unit='s')
column4_data = pyfits.Column(name='LST_SUB',format='1D',array=float_lst_sub,unit='s')
column5_data = pyfits.Column(name='RA_SUB',format='1D',array=float_ra_sub,unit='deg')
column6_data = pyfits.Column(name='DEC_SUB',format='1D',array=float_dec_sub,unit='deg')
column7_data = pyfits.Column(name='GLON_SUB',format='1D',array=float_glon_sub,unit='deg')
column8_data = pyfits.Column(name='GLAT_SUB',format='1D',array=float_glat_sub,unit='deg')
column9_data = pyfits.Column(name='FD_ANG',format='1E',array=float_fd_ang,unit='deg')
column10_data = pyfits.Column(name='POS_ANG',format='1E',array=float_pos_ang,unit='deg')
column11_data = pyfits.Column(name='PAR_ANG',format='1E',array=float_par_ang,unit='deg')
column12_data = pyfits.Column(name='TEL_AZ',format='1E',array=float_tel_az,unit='deg')
column13_data = pyfits.Column(name='TEL_ZEN',format='1E',array=float_tel_zen,unit='deg')
#column14_data = pyfits.Column(name='AUX_DM',format='1E',array=float_aux_dm)
#column15_data = pyfits.Column(name='AUX_RM',format='1E',array=float_aux_rm)
#column16_data = pyfits.Column(name='DAT_FREQ',format=dataformat2,array=float_dat_freq)
column16_data = pyfits.Column(name='DAT_FREQ',format=dataformat2,array=float_dat_freq,unit='deg')
column17_data = pyfits.Column(name='DAT_WTS',format=dataformat2,array=float_dat_wts,unit='deg')
column18_data = pyfits.Column(name='DAT_OFFS',format=dataformat2,array=float_dat_offs,unit='deg')
column19_data = pyfits.Column(name='DAT_SCL',format=dataformat2,array=float_dat_scl,unit='MHz')
#column20_data = pyfits.Column(name='DATA',format=dataformat,array=float_data2,unit='Jy')
#column20_data = pyfits.Column(name='DATA',format=dataformat,array=float_data2,unit='Jy')
print size(float_data2),size(float_data)
#column20_data_2 = pyfits.Column(name='DATA',format=dataformat,array=float_data3,unit='Jy')
column20_data_tot = pyfits.Column(name='DATA',format=dataformat,array=float_data_tot,unit='Jy')
#print hdu3_1.data[0]
#hdu3_1.data=hdu3.data[0]
table_hdu3 = pyfits.new_table([column2_data,column3_data,column4_data,column5_data,column6_data,column7_data,column8_data,column9_data,column10_data,column11_data,column12_data,column13_data,column16_data,column17_data,column18_data,column19_data,column20_data_tot])
table_hdu3.header.append(('INT_TYPE','TIME','Time axis (TIME, BINPHSPERI, BINLNGASC, etc)'))
table_hdu3.header.append(('INT_UNIT','SEC','Unit of time axis (SEC, PHS (0-1),DEG)'))
table_hdu3.header.append(('SCALE','FluxDec','Intensiy units (FluxDec/RefFlux/Jansky)'))
table_hdu3.header.append(('NPOL',1,'Nr of polarisations'))
table_hdu3.header.append(('POL_TYPE','AA','Polarisation identifier (e.g., AABBCRCI, AA+BB)'))
table_hdu3.header.append(('TBIN',tbin,'[s] Time per bin or sample'))
table_hdu3.header.append(('NBIN',1,'Nr of bins (PSR/CAL mode; else 1)'))
table_hdu3.header.append(('NBIN_PRD',0,'Nr of bins/pulse period (for gated data)'))
table_hdu3.header.append(('PHS_OFFS',0.0,'Phase offset of bin 0 for gated data'))
table_hdu3.header.append(('NBITS',8,'Nr of bits/datum (SEARCH mode "X" data, else 1)'))
table_hdu3.header.append(('NSUBOFFS',0,'Subint offset (Contiguous SEARCH-mode files)'))
table_hdu3.header.append(('NCHAN',nchan,'Number of channels/sub-bands in this file'))
table_hdu3.header.append(('CHAN_BW',chan_bw,'[MHz] Channel/sub-band width'))
table_hdu3.header.append(('NCHNOFFS',0,'Channel/sub-band offset for split files'))
table_hdu3.header.append(('NSBLK',nsblk,'Samples/row (SEARCH mode, else 1)'))
table_hdu3.header.append(('EXTNAME','SUBINT ','name of this binary table extension'))# This line is the most important
hdulist4 = pyfits.HDUList([hdu0,table_hdu3])
#outname3=fileroot+'_tot_'+startfreq+'_'+endfreq+'_'+startn+'_'+endn+'.fits'
outname3=fileroot+'.fits'
rmcomm3='rm -f '+outname3
os.system(rmcomm3)
hdulist4.writeto(outname3)
print '--------------------------------------------'
print ' Finished! '
endtime=datetime.datetime.now()
print 'START:',starttime
print 'END:',endtime
duration=endtime-starttime
print 'DURATION:',duration.seconds,' sec'
|
qianlivanREPO_NAMERPPPSPATH_START.@RPPPS_extracted@RPPPS-master@splitpol_FASTpsrfits.py@.PATH_END.py
|
{
"filename": "_legendrank.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/_legendrank.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendrankValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendrank", parent_name="mesh3d", **kwargs):
super(LegendrankValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@_legendrank.py@.PATH_END.py
|
{
"filename": "_b.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/carpet/_b.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="b", parent_name="carpet", **kwargs):
super(BValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@carpet@_b.py@.PATH_END.py
|
{
"filename": "_interpolative_backend.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/linalg/_interpolative_backend.py",
"type": "Python"
}
|
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
"""
Direct wrappers for Fortran `id_dist` backend.
"""
import scipy.linalg._interpolative as _id
import numpy as np
_RETCODE_ERROR = RuntimeError("nonzero return code")
#------------------------------------------------------------------------------
# id_rand.f
#------------------------------------------------------------------------------
def id_srand(n):
"""
Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
:param n:
Number of pseudorandom numbers to generate.
:type n: int
:return:
Pseudorandom numbers.
:rtype: :class:`numpy.ndarray`
"""
return _id.id_srand(n)
def id_srandi(t):
"""
Initialize seed values for :func:`id_srand` (any appropriately random
numbers will do).
:param t:
Array of 55 seed values.
:type t: :class:`numpy.ndarray`
"""
t = np.asfortranarray(t)
_id.id_srandi(t)
def id_srando():
"""
Reset seed values to their original values.
"""
_id.id_srando()
#------------------------------------------------------------------------------
# idd_frm.f
#------------------------------------------------------------------------------
def idd_frm(n, w, x):
"""
Transform real vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idd_sfrm`, this routine works best when the length of
the transformed vector is the power-of-two integer output by
:func:`idd_frmi`, or when the length is not specified but instead
determined a posteriori from the output. The returned transformed vector is
randomly permuted.
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idd_frmi`; `n` is also the length of the output vector.
:type n: int
:param w:
Initialization array constructed by :func:`idd_frmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_frm(n, w, x)
def idd_sfrm(l, n, w, x):
"""
Transform real vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idd_frm`, this routine works best when the length of
the transformed vector is known a priori.
:param l:
Length of transformed vector, satisfying `l <= n`.
:type l: int
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idd_sfrmi`.
:type n: int
:param w:
Initialization array constructed by :func:`idd_sfrmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_sfrm(l, n, w, x)
def idd_frmi(m):
"""
Initialize data for :func:`idd_frm`.
:param m:
Length of vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idd_frm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_frmi(m)
def idd_sfrmi(l, m):
"""
Initialize data for :func:`idd_sfrm`.
:param l:
Length of output transformed vector.
:type l: int
:param m:
Length of the vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idd_sfrm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_sfrmi(l, m)
#------------------------------------------------------------------------------
# idd_id.f
#------------------------------------------------------------------------------
def iddp_id(eps, A):
"""
Compute ID of a real matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
k, idx, rnorms = _id.iddp_id(eps, A)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def iddr_id(A, k):
"""
Compute ID of a real matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
idx, rnorms = _id.iddr_id(A, k)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
def idd_reconid(B, idx, proj):
"""
Reconstruct matrix from real ID.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Reconstructed matrix.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
if proj.size > 0:
return _id.idd_reconid(B, idx, proj)
else:
return B[:, np.argsort(idx)]
def idd_reconint(idx, proj):
"""
Reconstruct interpolation matrix from real ID.
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Interpolation matrix.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_reconint(idx, proj)
def idd_copycols(A, k, idx):
"""
Reconstruct skeleton matrix from real ID.
:param A:
Original matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:return:
Skeleton matrix.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
return _id.idd_copycols(A, k, idx)
#------------------------------------------------------------------------------
# idd_id2svd.f
#------------------------------------------------------------------------------
def idd_id2svd(B, idx, proj):
"""
Convert real ID to SVD.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
U, V, S, ier = _id.idd_id2svd(B, idx, proj)
if ier:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idd_snorm.f
#------------------------------------------------------------------------------
def idd_snorm(m, n, matvect, matvec, its=20):
"""
Estimate spectral norm of a real matrix by the randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate.
:rtype: float
"""
snorm, v = _id.idd_snorm(m, n, matvect, matvec, its)
return snorm
def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20):
"""
Estimate spectral norm of the difference of two real matrices by the
randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the transpose of the first matrix to a vector, with
call signature `y = matvect(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect: function
:param matvect2:
Function to apply the transpose of the second matrix to a vector, with
call signature `y = matvect2(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect2: function
:param matvec:
Function to apply the first matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param matvec2:
Function to apply the second matrix to a vector, with call signature
`y = matvec2(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec2: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate of matrix difference.
:rtype: float
"""
return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its)
#------------------------------------------------------------------------------
# idd_svd.f
#------------------------------------------------------------------------------
def iddr_svd(A, k):
"""
Compute SVD of a real matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
U, V, S, ier = _id.iddr_svd(A, k)
if ier:
raise _RETCODE_ERROR
return U, V, S
def iddp_svd(eps, A):
"""
Compute SVD of a real matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
k, iU, iV, iS, w, ier = _id.iddp_svd(eps, A)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# iddp_aid.f
#------------------------------------------------------------------------------
def iddp_aid(eps, A):
"""
Compute ID of a real matrix to a specified relative precision using random
sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idd_frmi(m)
proj = np.empty(n*(2*n2 + 1) + n2 + 1, order='F')
k, idx, proj = _id.iddp_aid(eps, A, w, proj)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idd_estrank(eps, A):
"""
Estimate rank of a real matrix to a specified relative precision using
random sampling.
The output rank is typically about 8 higher than the actual rank.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank estimate.
:rtype: int
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idd_frmi(m)
ra = np.empty(n*n2 + (n + 1)*(n2 + 1), order='F')
k, ra = _id.idd_estrank(eps, A, w, ra)
return k
#------------------------------------------------------------------------------
# iddp_asvd.f
#------------------------------------------------------------------------------
def iddp_asvd(eps, A):
"""
Compute SVD of a real matrix to a specified relative precision using random
sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, winit = _id.idd_frmi(m)
w = np.empty(
max((min(m, n) + 1)*(3*m + 5*n + 1) + 25*min(m, n)**2,
(2*n + 1)*(n2 + 1)),
order='F')
k, iU, iV, iS, w, ier = _id.iddp_asvd(eps, A, winit, w)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# iddp_rid.f
#------------------------------------------------------------------------------
def iddp_rid(eps, m, n, matvect):
"""
Compute ID of a real matrix to a specified relative precision using random
matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
proj = np.empty(m + 1 + 2*n*(min(m, n) + 1), order='F')
k, idx, proj, ier = _id.iddp_rid(eps, m, n, matvect, proj)
if ier != 0:
raise _RETCODE_ERROR
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idd_findrank(eps, m, n, matvect):
"""
Estimate rank of a real matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:return:
Rank estimate.
:rtype: int
"""
k, ra, ier = _id.idd_findrank(eps, m, n, matvect)
if ier:
raise _RETCODE_ERROR
return k
#------------------------------------------------------------------------------
# iddp_rsvd.f
#------------------------------------------------------------------------------
def iddp_rsvd(eps, m, n, matvect, matvec):
"""
Compute SVD of a real matrix to a specified relative precision using random
matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
k, iU, iV, iS, w, ier = _id.iddp_rsvd(eps, m, n, matvect, matvec)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# iddr_aid.f
#------------------------------------------------------------------------------
def iddr_aid(A, k):
"""
Compute ID of a real matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = iddr_aidi(m, n, k)
idx, proj = _id.iddr_aid(A, k, w)
if k == n:
proj = np.array([], dtype='float64', order='F')
else:
proj = proj.reshape((k, n-k), order='F')
return idx, proj
def iddr_aidi(m, n, k):
"""
Initialize array for :func:`iddr_aid`.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param k:
Rank of ID.
:type k: int
:return:
Initialization array to be used by :func:`iddr_aid`.
:rtype: :class:`numpy.ndarray`
"""
return _id.iddr_aidi(m, n, k)
#------------------------------------------------------------------------------
# iddr_asvd.f
#------------------------------------------------------------------------------
def iddr_asvd(A, k):
"""
Compute SVD of a real matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = np.empty((2*k + 28)*m + (6*k + 21)*n + 25*k**2 + 100, order='F')
w_ = iddr_aidi(m, n, k)
w[:w_.size] = w_
U, V, S, ier = _id.iddr_asvd(A, k, w)
if ier != 0:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# iddr_rid.f
#------------------------------------------------------------------------------
def iddr_rid(m, n, matvect, k):
"""
Compute ID of a real matrix to a specified rank using random matrix-vector
multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
idx, proj = _id.iddr_rid(m, n, matvect, k)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
#------------------------------------------------------------------------------
# iddr_rsvd.f
#------------------------------------------------------------------------------
def iddr_rsvd(m, n, matvect, matvec, k):
"""
Compute SVD of a real matrix to a specified rank using random matrix-vector
multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
U, V, S, ier = _id.iddr_rsvd(m, n, matvect, matvec, k)
if ier != 0:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idz_frm.f
#------------------------------------------------------------------------------
def idz_frm(n, w, x):
"""
Transform complex vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idz_sfrm`, this routine works best when the length of
the transformed vector is the power-of-two integer output by
:func:`idz_frmi`, or when the length is not specified but instead
determined a posteriori from the output. The returned transformed vector is
randomly permuted.
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idz_frmi`; `n` is also the length of the output vector.
:type n: int
:param w:
Initialization array constructed by :func:`idz_frmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_frm(n, w, x)
def idz_sfrm(l, n, w, x):
"""
Transform complex vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idz_frm`, this routine works best when the length of
the transformed vector is known a priori.
:param l:
Length of transformed vector, satisfying `l <= n`.
:type l: int
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idz_sfrmi`.
:type n: int
:param w:
Initialization array constructed by :func:`idd_sfrmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_sfrm(l, n, w, x)
def idz_frmi(m):
"""
Initialize data for :func:`idz_frm`.
:param m:
Length of vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idz_frm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_frmi(m)
def idz_sfrmi(l, m):
"""
Initialize data for :func:`idz_sfrm`.
:param l:
Length of output transformed vector.
:type l: int
:param m:
Length of the vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idz_sfrm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_sfrmi(l, m)
#------------------------------------------------------------------------------
# idz_id.f
#------------------------------------------------------------------------------
def idzp_id(eps, A):
"""
Compute ID of a complex matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
k, idx, rnorms = _id.idzp_id(eps, A)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idzr_id(A, k):
"""
Compute ID of a complex matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
idx, rnorms = _id.idzr_id(A, k)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
def idz_reconid(B, idx, proj):
"""
Reconstruct matrix from complex ID.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Reconstructed matrix.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
if proj.size > 0:
return _id.idz_reconid(B, idx, proj)
else:
return B[:, np.argsort(idx)]
def idz_reconint(idx, proj):
"""
Reconstruct interpolation matrix from complex ID.
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Interpolation matrix.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_reconint(idx, proj)
def idz_copycols(A, k, idx):
"""
Reconstruct skeleton matrix from complex ID.
:param A:
Original matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:return:
Skeleton matrix.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
return _id.idz_copycols(A, k, idx)
#------------------------------------------------------------------------------
# idz_id2svd.f
#------------------------------------------------------------------------------
def idz_id2svd(B, idx, proj):
"""
Convert complex ID to SVD.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
U, V, S, ier = _id.idz_id2svd(B, idx, proj)
if ier:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idz_snorm.f
#------------------------------------------------------------------------------
def idz_snorm(m, n, matveca, matvec, its=20):
"""
Estimate spectral norm of a complex matrix by the randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate.
:rtype: float
"""
snorm, v = _id.idz_snorm(m, n, matveca, matvec, its)
return snorm
def idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its=20):
"""
Estimate spectral norm of the difference of two complex matrices by the
randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the adjoint of the first matrix to a vector, with
call signature `y = matveca(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matveca: function
:param matveca2:
Function to apply the adjoint of the second matrix to a vector, with
call signature `y = matveca2(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matveca2: function
:param matvec:
Function to apply the first matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param matvec2:
Function to apply the second matrix to a vector, with call signature
`y = matvec2(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec2: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate of matrix difference.
:rtype: float
"""
return _id.idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its)
#------------------------------------------------------------------------------
# idz_svd.f
#------------------------------------------------------------------------------
def idzr_svd(A, k):
"""
Compute SVD of a complex matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
U, V, S, ier = _id.idzr_svd(A, k)
if ier:
raise _RETCODE_ERROR
return U, V, S
def idzp_svd(eps, A):
"""
Compute SVD of a complex matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
k, iU, iV, iS, w, ier = _id.idzp_svd(eps, A)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# idzp_aid.f
#------------------------------------------------------------------------------
def idzp_aid(eps, A):
"""
Compute ID of a complex matrix to a specified relative precision using
random sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idz_frmi(m)
proj = np.empty(n*(2*n2 + 1) + n2 + 1, dtype='complex128', order='F')
k, idx, proj = _id.idzp_aid(eps, A, w, proj)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idz_estrank(eps, A):
"""
Estimate rank of a complex matrix to a specified relative precision using
random sampling.
The output rank is typically about 8 higher than the actual rank.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank estimate.
:rtype: int
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idz_frmi(m)
ra = np.empty(n*n2 + (n + 1)*(n2 + 1), dtype='complex128', order='F')
k, ra = _id.idz_estrank(eps, A, w, ra)
return k
#------------------------------------------------------------------------------
# idzp_asvd.f
#------------------------------------------------------------------------------
def idzp_asvd(eps, A):
"""
Compute SVD of a complex matrix to a specified relative precision using
random sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, winit = _id.idz_frmi(m)
w = np.empty(
max((min(m, n) + 1)*(3*m + 5*n + 11) + 8*min(m, n)**2,
(2*n + 1)*(n2 + 1)),
dtype=np.complex128, order='F')
k, iU, iV, iS, w, ier = _id.idzp_asvd(eps, A, winit, w)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# idzp_rid.f
#------------------------------------------------------------------------------
def idzp_rid(eps, m, n, matveca):
"""
Compute ID of a complex matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
proj = np.empty(
m + 1 + 2*n*(min(m, n) + 1),
dtype=np.complex128, order='F')
k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj)
if ier:
raise _RETCODE_ERROR
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idz_findrank(eps, m, n, matveca):
"""
Estimate rank of a complex matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:return:
Rank estimate.
:rtype: int
"""
k, ra, ier = _id.idz_findrank(eps, m, n, matveca)
if ier:
raise _RETCODE_ERROR
return k
#------------------------------------------------------------------------------
# idzp_rsvd.f
#------------------------------------------------------------------------------
def idzp_rsvd(eps, m, n, matveca, matvec):
"""
Compute SVD of a complex matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
k, iU, iV, iS, w, ier = _id.idzp_rsvd(eps, m, n, matveca, matvec)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# idzr_aid.f
#------------------------------------------------------------------------------
def idzr_aid(A, k):
"""
Compute ID of a complex matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = idzr_aidi(m, n, k)
idx, proj = _id.idzr_aid(A, k, w)
if k == n:
proj = np.array([], dtype='complex128', order='F')
else:
proj = proj.reshape((k, n-k), order='F')
return idx, proj
def idzr_aidi(m, n, k):
"""
Initialize array for :func:`idzr_aid`.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param k:
Rank of ID.
:type k: int
:return:
Initialization array to be used by :func:`idzr_aid`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idzr_aidi(m, n, k)
#------------------------------------------------------------------------------
# idzr_asvd.f
#------------------------------------------------------------------------------
def idzr_asvd(A, k):
"""
Compute SVD of a complex matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = np.empty(
(2*k + 22)*m + (6*k + 21)*n + 8*k**2 + 10*k + 90,
dtype='complex128', order='F')
w_ = idzr_aidi(m, n, k)
w[:w_.size] = w_
U, V, S, ier = _id.idzr_asvd(A, k, w)
if ier:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idzr_rid.f
#------------------------------------------------------------------------------
def idzr_rid(m, n, matveca, k):
"""
Compute ID of a complex matrix to a specified rank using random
matrix-vector multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
idx, proj = _id.idzr_rid(m, n, matveca, k)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
#------------------------------------------------------------------------------
# idzr_rsvd.f
#------------------------------------------------------------------------------
def idzr_rsvd(m, n, matveca, matvec, k):
"""
Compute SVD of a complex matrix to a specified rank using random
matrix-vector multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
U, V, S, ier = _id.idzr_rsvd(m, n, matveca, matvec, k)
if ier:
raise _RETCODE_ERROR
return U, V, S
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@linalg@_interpolative_backend.py@.PATH_END.py
|
{
"filename": "_version.py",
"repo_name": "SNEWS2/snewpy",
"repo_path": "snewpy_extracted/snewpy-main/python/snewpy/_version.py",
"type": "Python"
}
|
__version__ = '1.6b1'
|
SNEWS2REPO_NAMEsnewpyPATH_START.@snewpy_extracted@snewpy-main@python@snewpy@_version.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/optimize/_lsq/__init__.py",
"type": "Python"
}
|
"""This module contains least-squares algorithms."""
from __future__ import division, print_function, absolute_import
from .least_squares import least_squares
from .lsq_linear import lsq_linear
__all__ = ['least_squares', 'lsq_linear']
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@optimize@_lsq@__init__.py@.PATH_END.py
|
{
"filename": "filters.py",
"repo_name": "IvS-KULeuven/IvSPythonRepository",
"repo_path": "IvSPythonRepository_extracted/IvSPythonRepository-master/sed/filters.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Functions relevant for photometric calibration
Table of contents:
1. Available response functions
2. Adding filters on the fly
- Defining a new filter
- Temporarily modifying an existing filter
3. Adding filters permanently
Section 1. Available response functions
=======================================
Short list of available systems:
>>> responses = list_response()
>>> systems = [response.split('.')[0] for response in responses]
>>> set_responses = sorted(set([response.split('.')[0] for response in systems]))
>>> for i,resp in enumerate(set_responses):
... print '%10s (%3d filters)'%(resp,systems.count(resp))
2MASS ( 3 filters)
ACSHRC ( 17 filters)
ACSSBC ( 6 filters)
ACSWFC ( 12 filters)
AKARI ( 13 filters)
ANS ( 6 filters)
APEX ( 1 filters)
ARGUE ( 3 filters)
BESSEL ( 6 filters)
BESSELL ( 6 filters)
COROT ( 2 filters)
COUSINS ( 3 filters)
DDO ( 7 filters)
DENIS ( 3 filters)
DIRBE ( 10 filters)
EEV4280 ( 1 filters)
ESOIR ( 10 filters)
GAIA ( 4 filters)
GALEX ( 2 filters)
GENEVA ( 7 filters)
HIPPARCOS ( 1 filters)
IPHAS ( 3 filters)
IRAC ( 4 filters)
IRAS ( 4 filters)
ISOCAM ( 21 filters)
JOHNSON ( 25 filters)
KEPLER ( 43 filters)
KRON ( 2 filters)
LANDOLT ( 6 filters)
MIPS ( 3 filters)
MOST ( 1 filters)
MSX ( 6 filters)
NARROW ( 1 filters)
NICMOS ( 6 filters)
OAO2 ( 12 filters)
PACS ( 3 filters)
SAAO ( 13 filters)
SCUBA ( 6 filters)
SDSS ( 10 filters)
SLOAN ( 2 filters)
SPIRE ( 3 filters)
STEBBINS ( 6 filters)
STISCCD ( 2 filters)
STISFUV ( 4 filters)
STISNUV ( 7 filters)
STROMGREN ( 6 filters)
TD1 ( 4 filters)
TYCHO ( 2 filters)
TYCHO2 ( 2 filters)
ULTRACAM ( 5 filters)
USNOB1 ( 2 filters)
UVEX ( 5 filters)
VILNIUS ( 7 filters)
VISIR ( 13 filters)
WALRAVEN ( 5 filters)
WFCAM ( 5 filters)
WFPC2 ( 21 filters)
WISE ( 4 filters)
WOOD ( 12 filters)
Plots of all passbands of all systems:
]include figure]]ivs_sed_filters_2MASS.png]
]include figure]]ivs_sed_filters_ACSHRC.png]
]include figure]]ivs_sed_filters_ACSSBC.png]
]include figure]]ivs_sed_filters_ACSWFC.png]
]include figure]]ivs_sed_filters_AKARI.png]
]include figure]]ivs_sed_filters_ANS.png]
]include figure]]ivs_sed_filters_APEX.png]
]include figure]]ivs_sed_filters_ARGUE.png]
]include figure]]ivs_sed_filters_BESSEL.png]
]include figure]]ivs_sed_filters_BESSELL.png]
]include figure]]ivs_sed_filters_COROT.png]
]include figure]]ivs_sed_filters_COUSINS.png]
]include figure]]ivs_sed_filters_DDO.png]
]include figure]]ivs_sed_filters_DENIS.png]
]include figure]]ivs_sed_filters_DIRBE.png]
]include figure]]ivs_sed_filters_ESOIR.png]
]include figure]]ivs_sed_filters_EEV4280.png]
]include figure]]ivs_sed_filters_GAIA.png]
]include figure]]ivs_sed_filters_GALEX.png]
]include figure]]ivs_sed_filters_GENEVA.png]
]include figure]]ivs_sed_filters_HIPPARCOS.png]
]include figure]]ivs_sed_filters_IPHAS.png]
]include figure]]ivs_sed_filters_IRAC.png]
]include figure]]ivs_sed_filters_IRAS.png]
]include figure]]ivs_sed_filters_ISOCAM.png]
]include figure]]ivs_sed_filters_JOHNSON.png]
]include figure]]ivs_sed_filters_KEPLER.png]
]include figure]]ivs_sed_filters_KRON.png]
]include figure]]ivs_sed_filters_LANDOLT.png]
]include figure]]ivs_sed_filters_MIPS.png]
]include figure]]ivs_sed_filters_MOST.png]
]include figure]]ivs_sed_filters_MSX.png]
]include figure]]ivs_sed_filters_NARROW.png]
]include figure]]ivs_sed_filters_NICMOS.png]
]include figure]]ivs_sed_filters_OAO2.png]
]include figure]]ivs_sed_filters_PACS.png]
]include figure]]ivs_sed_filters_SAAO.png]
]include figure]]ivs_sed_filters_SCUBA.png]
]include figure]]ivs_sed_filters_SDSS.png]
]include figure]]ivs_sed_filters_SLOAN.png]
]include figure]]ivs_sed_filters_SPIRE.png]
]include figure]]ivs_sed_filters_STEBBINS.png]
]include figure]]ivs_sed_filters_STISCCD.png]
]include figure]]ivs_sed_filters_STISFUV.png]
]include figure]]ivs_sed_filters_STISNUV.png]
]include figure]]ivs_sed_filters_STROMGREN.png]
]include figure]]ivs_sed_filters_TD1.png]
]include figure]]ivs_sed_filters_TYCHO.png]
]include figure]]ivs_sed_filters_TYCHO2.png]
]include figure]]ivs_sed_filters_ULTRACAM.png]
]include figure]]ivs_sed_filters_USNOB1.png]
]include figure]]ivs_sed_filters_UVEX.png]
]include figure]]ivs_sed_filters_VILNIUS.png]
]include figure]]ivs_sed_filters_VISIR.png]
]include figure]]ivs_sed_filters_WALRAVEN.png]
]include figure]]ivs_sed_filters_WFPC2.png]
]include figure]]ivs_sed_filters_WISE.png]
]include figure]]ivs_sed_filters_WOOD.png]
Section 2: Adding filters on the fly
====================================
Section 2.1: Defining a new filter
----------------------------------
You can add custom filters on the fly using L{add_custom_filter}. In this
example we add a weird-looking filter and check the definition of Flambda and
Fnu and its relation to the effective wavelength of a passband:
Prerequisites: some modules that come in handy:
>>> from ivs.sigproc import funclib
>>> from ivs.sed import model
>>> from ivs.units import conversions
First, we'll define a double peakd Gaussian profile on the wavelength grid of
the WISE.W3 response curve:
>>> wave = get_response('WISE.W3')[0]
>>> trans = funclib.evaluate('gauss',wave,[1.5,76000.,10000.,0.])
>>> trans+= funclib.evaluate('gauss',wave,[1.0,160000.,25000.,0.])
This is what it looks like:
>>> p = pl.figure()
>>> p = pl.plot(wave/1e4,trans,'k-')
>>> p = pl.xlabel("Wavelength [micron]")
>>> p = pl.ylabel("Transmission [arbitrary units]")
]include figure]]ivs_sed_filters_weird_trans.png]
We can add this filter to the list of predefined filters in the following way
(for the doctests to work, we have to do a little work around and call
filters via that module, this is not needed in a normal workflow):
>>> model.filters.add_custom_filter(wave,trans,photband='LAMBDA.CCD',type='CCD')
>>> model.filters.add_custom_filter(wave,trans,photband='LAMBDA.BOL',type='BOL')
Note that we add the filter twice, once assuming that it is mounted on a
bolometer, and once on a CCD device. We'll call the filter C{LAMBDA.CCD} and
C{LAMBDA.BOL}. From now on, they are available within functions as L{get_info}
and L{get_response}. For example, what is the effective (actually pivot)
wavelength?
>>> effwave_ccd = model.filters.eff_wave('LAMBDA.CCD')
>>> effwave_bol = model.filters.eff_wave('LAMBDA.BOL')
>>> print(effwave_ccd,effwave_bol)
(119263.54911400242, 102544.27931275869)
Let's do some synthetic photometry now. Suppose we have a black body atmosphere:
>>> bb = model.blackbody(wave,5777.)
We now calculate the synthetic flux, assuming the CCD and BOL device. We
compute the synthetic flux both in Flambda and Fnu:
>>> flam_ccd,flam_bol = model.synthetic_flux(wave,bb,['LAMBDA.CCD','LAMBDA.BOL'])
>>> fnu_ccd,fnu_bol = model.synthetic_flux(wave,bb,['LAMBDA.CCD','LAMBDA.BOL'],units=['FNU','FNU'])
You can see that the fluxes can be quite different when you assume photon or
energy counting devices!
>>> flam_ccd,flam_bol
(897.68536911320564, 1495.248213834755)
>>> fnu_ccd,fnu_bol
(4.2591095543803019e-06, 5.2446332430111098e-06)
Can we now readily convert Flambda to Fnu with assuming the pivot wavelength?
>>> fnu_fromflam_ccd = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_ccd,wave=(effwave_ccd,'A'))
>>> fnu_fromflam_bol = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_bol,wave=(effwave_bol,'A'))
Which is equivalent with:
>>> fnu_fromflam_ccd = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_ccd,photband='LAMBDA.CCD')
>>> fnu_fromflam_bol = conversions.convert('erg/s/cm2/AA','erg/s/cm2/Hz',flam_bol,photband='LAMBDA.BOL')
Apparently, with the definition of pivot wavelength, you can safely convert from
Fnu to Flambda:
>>> print(fnu_ccd,fnu_fromflam_ccd)
(4.2591095543803019e-06, 4.259110447428463e-06)
>>> print(fnu_bol,fnu_fromflam_bol)
(5.2446332430111098e-06, 5.2446373530017525e-06)
The slight difference you see is numerical.
Section 2.2: Temporarily modifying an existing filter
-----------------------------------------------------
Under usual conditions, you are prohibited from overwriting an existing predefined
response curve. That is, if you try to L{add_custom_filter} with a C{photband}
that already exists as a file, a C{ValueError} will be raised (this is not the
case for a custom defined filter, which you can overwrite without problems!).
If, for testing purposes, you want to use another definition of a predefined
response curve, you need to set C{force=True} in L{add_custom_filter}, and then
call
>>> set_prefer_file(False)
To reset and use the original definitions again, do
>>> set_prefer_file(True)
Section 3.: Adding filters permanently
--------------------------------------
Add a new response curve file to the ivs/sed/filters directory. The file should
contain two columns, the first column is the wavelength in angstrom, the second
column is the transmission curve. The units of the later are not important.
Then, call L{update_info}. The contents of C{zeropoints.dat} will automatically
be updated. Make sure to add any additional information on the new filters
manually in that file (e.g. is t a CCD or bolometer, what are the zeropoint
magnitudes etc).
"""
import os
import glob
import logging
import numpy as np
from ivs.aux.decorators import memoized
from ivs.aux import decorators
from ivs.aux import loggers
from ivs.inout import ascii
basedir = os.path.dirname(__file__)
logger = logging.getLogger("SED.FILT")
logger.addHandler(loggers.NullHandler())
custom_filters = {'_prefer_file':True}
#{ response curves
@memoized
def get_response(photband):
"""
Retrieve the response curve of a photometric system 'SYSTEM.FILTER'
OPEN.BOL represents a bolometric open filter.
Example usage:
>>> p = pl.figure()
>>> for band in ['J','H','KS']:
... p = pl.plot(*get_response('2MASS.%s'%(band)))
If you defined a custom filter with the same name as an existing one and
you want to use that one in the future, set C{prefer_file=False} in the
C{custom_filters} module dictionary.
@param photband: photometric passband
@type photband: str ('SYSTEM.FILTER')
@return: (wavelength [A], response)
@rtype: (array, array)
"""
photband = photband.upper()
prefer_file = custom_filters['_prefer_file']
if photband=='OPEN.BOL':
return np.array([1,1e10]),np.array([1/(1e10-1),1/(1e10-1)])
#-- either get from file or get from dictionary
photfile = os.path.join(basedir,'filters',photband)
photfile_is_file = os.path.isfile(photfile)
#-- if the file exists and files have preference
if photfile_is_file and prefer_file:
wave, response = ascii.read2array(photfile).T[:2]
#-- if the custom_filter exist
elif photband in custom_filters:
wave, response = custom_filters[photband]['response']
#-- if the file exists but custom filters have preference
elif photfile_is_file:
wave, response = ascii.read2array(photfile).T[:2]
else:
raise IOError('{0} does not exist {1}'.format(photband,list(custom_filters.keys())))
sa = np.argsort(wave)
return wave[sa],response[sa]
def create_custom_filter(wave,peaks,range=(3000,4000),sigma=3.):
"""
Create a custom filter as a sum of Gaussians.
@param wave: wavelength to evaluate the profile on
@type wave: ndarray
@param peaks: heights of the peaks
@type peaks: ndarray of length N, with N peaks
@param range: wavelength range of the peaks
@type range: tuple
@param sigma: width of the peaks in units of (range/N)
@type sigma: float
@return: filter profile
@rtype: ndarray
"""
wpnts = np.linspace(range[0],range[1],len(peaks)+2)[1:-1]
sigma = (range[1]-range[0])/(sigma*len(peaks))
gauss = lambda x,p: p[0] * np.exp( -(x-p[1])**2 / (2.0*p[2]**2))
els = [gauss(wave,[pk,mu,sigma]) for pk,mu in zip(peaks,wpnts)]
profile = np.array(els).sum(axis=0)
return profile
def add_custom_filter(wave,response,**kwargs):
"""
Add a custom filter to the set of predefined filters.
Extra keywords are:
'eff_wave', 'type',
'vegamag', 'vegamag_lit',
'ABmag', 'ABmag_lit',
'STmag', 'STmag_lit',
'Flam0', 'Flam0_units', 'Flam0_lit',
'Fnu0', 'Fnu0_units', 'Fnu0_lit',
'source'
default C{type} is 'CCD'.
default C{photband} is 'CUSTOM.FILTER'
@param wave: wavelength (angstrom)
@type wave: ndarray
@param response: response
@type response: ndarray
@param photband: photometric passband
@type photband: str ('SYSTEM.FILTER')
"""
kwargs.setdefault('photband','CUSTOM.FILTER')
kwargs.setdefault('copy_from','JOHNSON.V')
kwargs.setdefault('force',False)
photband = kwargs['photband']
#-- check if the filter already exists:
photfile = os.path.join(basedir,'filters',photband)
if os.path.isfile(photfile) and not kwargs['force']:
raise ValueError('bandpass {0} already exists'.format(photfile))
elif photband in custom_filters:
logger.debug('Overwriting previous definition of {0}'.format(photband))
custom_filters[photband] = dict(response=(wave,response))
#-- set effective wavelength
kwargs.setdefault('type','CCD')
kwargs.setdefault('eff_wave',eff_wave(photband,det_type=kwargs['type']))
#-- add info for zeropoints.dat file: make sure wherever "lit" is part of
# the name, we replace it with "0". Then, we overwrite any existing
# information with info given
myrow = get_info([kwargs['copy_from']])
for name in myrow.dtype.names:
if 'lit' in name:
myrow[name] = 0
myrow[name] = kwargs.pop(name,myrow[name])
del decorators.memory[__name__]
#-- add info:
custom_filters[photband]['zp'] = myrow
logger.debug('Added photband {0} to the predefined set'.format(photband))
def set_prefer_file(prefer_file=True):
"""
Set whether to prefer files or custom filters when both exist.
@param prefer_file: boolean
@type prefer_file: bool
"""
custom_filters['_prefer_file'] = prefer_file
logger.info("Prefering {}".format(prefer_file and 'files' or 'custom filters'))
def add_spectrophotometric_filters(R=200.,lambda0=950.,lambdan=3350.):
#-- STEP 1: define wavelength bins
Delta = np.log10(1.+1./R)
x = np.arange(np.log10(lambda0),np.log10(lambdan)+Delta,Delta)
x = 10**x
photbands = []
for i in range(len(x)-1):
wave = np.linspace(x[i],x[i+1],100)
resp = np.ones_like(wave)
dw = wave[1]-wave[0]
wave = np.hstack([wave[0]-dw,wave,wave[-1]+dw])
resp = np.hstack([0,resp,0])
photband = 'BOXCAR_R{0:d}.{1:d}'.format(int(R),int(x[i]))
try:
add_custom_filter(wave,resp,photband=photband)
except ValueError:
logger.info('{0} already exists, skipping'.format(photband))
photbands.append(photband)
logger.info('Added spectrophotometric filters')
return photbands
def list_response(name='*',wave_range=(-np.inf,+np.inf)):
"""
List available response curves.
Specify a glob string C{name} and/or a wavelength range to make a selection
of all available curves. If nothing is supplied, all curves will be returned.
@param name: list all curves containing this string
@type name: str
@param wave_range: list all curves within this wavelength range (A)
@type wave_range: (float, float)
@return: list of curve files
@rtype: list of str
"""
#-- collect all curve files but remove human eye responses
if not '*' in name:
name_ = '*' + name + '*'
else:
name_ = name
curve_files = sorted(glob.glob(os.path.join(basedir,'filters',name_.upper())))
curve_files = sorted(curve_files+[key for key in list(custom_filters.keys()) if ((name in key) and not (key=='_prefer_file'))])
curve_files = [cf for cf in curve_files if not ('HUMAN' in cf or 'EYE' in cf) ]
#-- select in correct wavelength range
curve_files = [os.path.basename(curve_file) for curve_file in curve_files if (wave_range[0]<=eff_wave(os.path.basename(curve_file))<=wave_range[1])]
#-- log to the screen and return
for curve_file in curve_files: logger.info(curve_file)
return curve_files
def is_color(photband):
"""
Return true if the photometric passband is actually a color.
@param photband: name of the photometric passband
@type photband: string
@return: True or False
@rtype: bool
"""
if '-' in photband.split('.')[1]:
return True
elif photband.split('.')[1].upper() in ['M1','C1']:
return True
else:
return False
def get_color_photband(photband):
"""
Retrieve the photometric bands from color
@param photband: name of the photometric passband
@type photband: string
@return: tuple of strings
@rtype: tuple
"""
system,band = photband.split('.')
band = band.strip() # remove extra spaces
if '-' in band:
bands = tuple(['%s.%s'%(system,iband) for iband in band.split('-')])
elif band.upper()=='M1':
bands = tuple(['%s.%s'%(system,iband) for iband in ['V','B','Y']])
elif band.upper()=='C1':
bands = tuple(['%s.%s'%(system,iband) for iband in ['V','B','U']])
else:
raise ValueError('cannot recognize color {}'.format(photband))
return bands
def make_color(photband):
"""
Make a color from a color name and fluxes.
You get two things: a list of photbands that are need to construct the color,
and a function which you need to pass fluxes to compute the color.
>>> bands, func = make_color('JOHNSON.B-V')
>>> print(bands)
('JOHNSON.B', 'JOHNSON.V')
>>> print(func(2,3.))
0.666666666667
@return: photbands, function to construct color
@rtype: tuple,callable
"""
system,band = photband.split('.')
band = band.strip() # remove extra spaces
photbands = get_color_photband(photband)
if len(band.split('-'))==2:
func = lambda f0,f1: f0/f1
elif band=='M1':
func = lambda fv,fb,fy: fv*fy/fb**2
elif band=='C1':
func = lambda fv,fb,fu: fu*fb/fv**2
else:
raise ValueError('cannot recognize color {}'.format(photband))
return photbands,func
def eff_wave(photband,model=None,det_type=None):
"""
Return the effective wavelength of a photometric passband.
The effective wavelength is defined as the average wavelength weighed with
the response curve.
>>> eff_wave('2MASS.J')
12393.093155655277
If you give model fluxes as an extra argument, the wavelengths will take
these into account to calculate the `true' effective wavelength (e.g.,
Van Der Bliek, 1996), eq 2.
@param photband: photometric passband
@type photband: str ('SYSTEM.FILTER') or array/list of str
@param model: model wavelength and fluxes
@type model: tuple of 1D arrays (wave,flux)
@return: effective wavelength [A]
@rtype: float or numpy array
"""
#-- if photband is a string, it's the name of a photband: put it in a container
# but unwrap afterwards
if isinstance(photband,str):
photband = str(photband)
if isinstance(photband,str):
single_band = True
photband = [photband]
#-- else, it is a container
else:
single_band = False
my_eff_wave = []
for iphotband in photband:
try:
wave,response = get_response(iphotband)
#-- bolometric or ccd?
if det_type is None and len(get_info([iphotband])):
det_type = get_info([iphotband])['type'][0]
elif det_type is None:
det_type = 'CCD'
if model is None:
#this_eff_wave = np.average(wave,weights=response)
if det_type=='BOL':
this_eff_wave = np.sqrt(np.trapz(response,x=wave)/np.trapz(response/wave**2,x=wave))
else:
this_eff_wave = np.sqrt(np.trapz(wave*response,x=wave)/np.trapz(response/wave,x=wave))
else:
#-- interpolate response curve onto higher resolution model and
# take weighted average
is_response = response>1e-10
start_response,end_response = wave[is_response].min(),wave[is_response].max()
fluxm = np.sqrt(10**np.interp(np.log10(wave),np.log10(model[0]),np.log10(model[1])))
if det_type=='CCD':
this_eff_wave = np.sqrt(np.trapz(wave*fluxm*response,x=wave) / np.trapz(fluxm*response/wave,x=wave))
elif det_type=='BOL':
this_eff_wave = np.sqrt(np.trapz(fluxm*response,x=wave) / np.trapz(fluxm*response/wave**2,x=wave))
#-- if the photband is not defined:
except IOError:
this_eff_wave = np.nan
my_eff_wave.append(this_eff_wave)
if single_band:
my_eff_wave = my_eff_wave[0]
else:
my_eff_wave = np.array(my_eff_wave,float)
return my_eff_wave
@memoized
def get_info(photbands=None):
"""
Return a record array containing all filter information.
The record arrays contains following columns:
- photband
- eff_wave
- type
- vegamag, vegamag_lit
- ABmag, ABmag_lit
- STmag, STmag_lit
- Flam0, Flam0_units, Flam0_lit
- Fnu0, Fnu0_units, Fnu0_lit,
- source
@param photbands: list of photbands to get the information from. The input
order is equal to the output order. If C{None}, all filters are returned.
@type photbands: iterable container (list, tuple, 1Darray)
@return: record array containing all information on the requested photbands.
@rtype: record array
"""
zp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'zeropoints.dat')
zp = ascii.read2recarray(zp_file)
for iph in custom_filters:
if iph=='_prefer_file': continue
if 'zp' in custom_filters[iph]:
zp = np.hstack([zp,custom_filters[iph]['zp']])
zp = zp[np.argsort(zp['photband'])]
#-- list photbands in order given, and remove those that do not have
# zeropoints etc.
if photbands is not None:
order = np.searchsorted(zp['photband'],photbands)
zp = zp[order]
keep = (zp['photband']==photbands)
zp = zp[keep]
return zp
def update_info(zp=None):
"""
Update information in zeropoint file, e.g. after calibration.
Call first L{ivs.sed.model.calibrate} without arguments, and pass the output
to this function.
@param zp: updated contents from C{zeropoints.dat}
@type zp: recarray
"""
zp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'zeropoints.dat')
zp_,comms = ascii.read2recarray(zp_file,return_comments=True)
existing = [str(i.strip()) for i in zp_['photband']]
resp_files = sorted(glob.glob(os.path.join(os.path.dirname(os.path.abspath(__file__)),'filters/*')))
resp_files = [os.path.basename(ff) for ff in resp_files if not os.path.basename(ff) in existing]
resp_files.remove('HUMAN.EYE')
resp_files.remove('HUMAN.CONES')
resp_files.remove('CONES.EYE')
if zp is None:
zp = zp_
logger.info('No new calibrations; previous information on existing response curves is copied')
else:
logger.info('Received new calibrations contents of zeropoints.dat will be updated')
#-- update info on previously non existing response curves
new_zp = np.zeros(len(resp_files),dtype=zp.dtype)
logger.info('Found {} new response curves, adding them with default information'.format(len(resp_files)))
for i,respfile in enumerate(resp_files):
new_zp[i]['photband'] = respfile
new_zp[i]['eff_wave'] = float(eff_wave(respfile))
new_zp[i]['type'] = 'CCD'
new_zp[i]['vegamag'] = np.nan
new_zp[i]['ABmag'] = np.nan
new_zp[i]['STmag'] = np.nan
new_zp[i]['Flam0_units'] = 'erg/s/cm2/AA'
new_zp[i]['Fnu0_units'] = 'erg/s/cm2/AA'
new_zp[i]['source'] = 'nan'
zp = np.hstack([zp,new_zp])
sa = np.argsort(zp['photband'])
ascii.write_array(zp[sa],'zeropoints.dat',header=True,auto_width=True,comments=['#'+line for line in comms[:-2]],use_float='%g')
def get_plotsymbolcolorinfo():
"""
Return the arrays needed to always plot the same photometric system with the same color.
"""
photsystem_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'list_photsystems_sorted.dat')
plotcolorvalues_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'plotcolorvalues.dat')
try:
sortedphotsystems = ascii.read2array(photsystem_file,dtype='str')
except IOError:
logger.info('Loading of {} file failed. No fixed symbol color for each photometric system possible.'.format(photsystem_file))
try:
plotcolorvalues = ascii.read2array(plotcolorvalues_file)
except IOError:
logger.info('Loading of {} file failed. No fixed symbol color for each photometric system possible.'.format(plotcolorvalues_file))
try:
if len(sortedphotsystems) == len(plotcolorvalues):
return sortedphotsystems.ravel(),plotcolorvalues.ravel()
else:
raise IndexError
print('{} should be of equal length as {}.'.format(plotcolorvalues_file,photsystem_file))
except NameError:
return None,None
if __name__=="__main__":
import sys
import pylab as pl
if not sys.argv[1:]:
import doctest
doctest.testmod()
pl.show()
else:
import itertools
responses = list_response()
systems = [response.split('.')[0] for response in responses]
set_responses = sorted(set([response.split('.')[0] for response in systems]))
this_filter = 0
for i,resp in enumerate(responses):
# what system is this, and how many filters are in this system?
this_system = resp.split('.')[0]
nr_filters = systems.count(this_system)
# call the plot containing the filters of the same system. If this is the
# the first time the plot is called (first filter of system), then set
# the title and color cycle
p = pl.figure(set_responses.index(this_system),figsize=(10,4.5))
if not hasattr(pl.gca(),'color_cycle'):
color_cycle = itertools.cycle([pl.cm.spectral(j) for j in np.linspace(0, 1.0, nr_filters)])
p = pl.gca().color_cycle = color_cycle
color = next(pl.gca().color_cycle)
p = pl.title(resp.split('.')[0])
# get the response curve and plot it
wave,trans = get_response(resp)
p = pl.plot(wave/1e4,trans,label=resp,color=color)
# and set labels
p = pl.xlabel('Wavelength [micron]')
p = pl.ylabel('Transmission')
# if there are not more filters in this systems, save the plot to a file
# and close it
this_filter+=1
if this_filter==nr_filters:
this_filter = 0
p = pl.legend(prop=dict(size='small'))
p = pl.savefig('/home/pieterd/python/ivs/doc/images/ivs_sed_filters_%s'%(this_system));p = pl.close()
|
IvS-KULeuvenREPO_NAMEIvSPythonRepositoryPATH_START.@IvSPythonRepository_extracted@IvSPythonRepository-master@sed@filters.py@.PATH_END.py
|
{
"filename": "test_mpi_fail.py",
"repo_name": "adrn/schwimmbad",
"repo_path": "schwimmbad_extracted/schwimmbad-main/tests/test_mpi_fail.py",
"type": "Python"
}
|
# type: ignore
"""
I couldn't figure out how to get py.test and MPI to play nice together,
so this is a script that tests the MPIPool
"""
# Standard library
import random
import pytest
from schwimmbad.mpi import MPIPool
def worker_error(task):
raise AttributeError("Derp")
@pytest.mark.skip(True, reason="WTF")
def test_mpi_worker_error():
with MPIPool() as pool:
tasks = [random.random() for i in range(1000)]
pool.map(worker_error, tasks) # should fail
if __name__ == "__main__":
test_mpi_worker_error()
|
adrnREPO_NAMEschwimmbadPATH_START.@schwimmbad_extracted@schwimmbad-main@tests@test_mpi_fail.py@.PATH_END.py
|
{
"filename": "jsonpickle.py",
"repo_name": "sdss/cluplus",
"repo_path": "cluplus_extracted/cluplus-main/python/cluplus/parsers/jsonpickle.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Florian Briegel (briegel@mpia.de)
# @Date: 2021-08-12
# @Filename: jsonpickle.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
'''
'''
from __future__ import annotations
import click
from clu.command import Command
from clu.parsers.click import command_parser
import jsonpickle
import numpy as np
class JsonPickleParamType(click.ParamType):
name = "jsonpickle"
def convert(self, value, param, ctx):
try:
return jsonpickle.decode(value)
except ValueError:
self.fail(f"{value!r} is not a valid jsonpickle", param, ctx)
def pickle(*argv):
'''
converts single or multiple data to a quoted json string.
'''
if(len(argv) > 1):
return ["'" + jsonpickle.encode(a, make_refs=False) + "'" for a in argv]
else:
return "'" + jsonpickle.encode(argv[0], make_refs=False) + "'"
def unpickle(*argv):
'''
converts single or multiple data to a quoted json string.
'''
if(len(argv) > 1):
return [jsonpickle.decode(a[1:-1]) for a in argv]
else:
return jsonpickle.decode(argv[0][1:-1])
|
sdssREPO_NAMEcluplusPATH_START.@cluplus_extracted@cluplus-main@python@cluplus@parsers@jsonpickle.py@.PATH_END.py
|
{
"filename": "hp_utils_test.py",
"repo_name": "keras-team/keras-tuner",
"repo_path": "keras-tuner_extracted/keras-tuner-master/keras_tuner/engine/hyperparameters/hp_utils_test.py",
"type": "Python"
}
|
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_tuner.engine.hyperparameters import hp_utils
def test_sampling_from_proto_raise_error():
with pytest.raises(ValueError, match="Expected sampling"):
hp_utils.sampling_from_proto("a")
def test_sampling_to_proto_raise_error():
with pytest.raises(ValueError, match="Expected sampling"):
hp_utils.sampling_to_proto("a")
|
keras-teamREPO_NAMEkeras-tunerPATH_START.@keras-tuner_extracted@keras-tuner-master@keras_tuner@engine@hyperparameters@hp_utils_test.py@.PATH_END.py
|
{
"filename": "kl.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/distributions/kl.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
from torch.distributions import (
Independent,
MultivariateNormal,
Normal,
kl_divergence,
register_kl,
)
from pyro.distributions.delta import Delta
from pyro.distributions.distribution import Distribution
from pyro.distributions.util import sum_rightmost
@register_kl(Delta, Distribution)
def _kl_delta(p, q):
return -q.log_prob(p.v)
@register_kl(Independent, Independent)
def _kl_independent_independent(p, q):
shared_ndims = min(p.reinterpreted_batch_ndims, q.reinterpreted_batch_ndims)
p_ndims = p.reinterpreted_batch_ndims - shared_ndims
q_ndims = q.reinterpreted_batch_ndims - shared_ndims
p = Independent(p.base_dist, p_ndims) if p_ndims else p.base_dist
q = Independent(q.base_dist, q_ndims) if q_ndims else q.base_dist
kl = kl_divergence(p, q)
if shared_ndims:
kl = sum_rightmost(kl, shared_ndims)
return kl
@register_kl(Independent, MultivariateNormal)
def _kl_independent_mvn(p, q):
if isinstance(p.base_dist, Delta) and p.reinterpreted_batch_ndims == 1:
return -q.log_prob(p.base_dist.v)
if isinstance(p.base_dist, Normal) and p.reinterpreted_batch_ndims == 1:
dim = q.event_shape[0]
p_cov = p.base_dist.scale**2
q_precision = q.precision_matrix.diagonal(dim1=-2, dim2=-1)
return (
0.5 * (p_cov * q_precision).sum(-1)
- 0.5 * dim * (1 + math.log(2 * math.pi))
- q.log_prob(p.base_dist.loc)
- p.base_dist.scale.log().sum(-1)
)
raise NotImplementedError
__all__ = []
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@distributions@kl.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "arjunsavel/cortecs",
"repo_path": "cortecs_extracted/cortecs-main/src/cortecs/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
|
arjunsavelREPO_NAMEcortecsPATH_START.@cortecs_extracted@cortecs-main@src@cortecs@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/sankey/link/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._colorscale import Colorscale
from ._hoverlabel import Hoverlabel
from ._line import Line
from . import hoverlabel
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".hoverlabel"],
["._colorscale.Colorscale", "._hoverlabel.Hoverlabel", "._line.Line"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@sankey@link@__init__.py@.PATH_END.py
|
{
"filename": "_legendwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/table/_legendwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendwidth", parent_name="table", **kwargs):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@table@_legendwidth.py@.PATH_END.py
|
{
"filename": "test_gridspec.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/tests/test_gridspec.py",
"type": "Python"
}
|
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pytest
def test_equal():
gs = gridspec.GridSpec(2, 1)
assert gs[0, 0] == gs[0, 0]
assert gs[:, 0] == gs[:, 0]
def test_width_ratios():
"""
Addresses issue #5835.
See at https://github.com/matplotlib/matplotlib/issues/5835.
"""
with pytest.raises(ValueError):
gridspec.GridSpec(1, 1, width_ratios=[2, 1, 3])
def test_height_ratios():
"""
Addresses issue #5835.
See at https://github.com/matplotlib/matplotlib/issues/5835.
"""
with pytest.raises(ValueError):
gridspec.GridSpec(1, 1, height_ratios=[2, 1, 3])
def test_repr():
ss = gridspec.GridSpec(3, 3)[2, 1:3]
assert repr(ss) == "GridSpec(3, 3)[2:3, 1:3]"
ss = gridspec.GridSpec(2, 2,
height_ratios=(3, 1),
width_ratios=(1, 3))
assert repr(ss) == \
"GridSpec(2, 2, height_ratios=(3, 1), width_ratios=(1, 3))"
def test_subplotspec_args():
fig, axs = plt.subplots(1, 2)
# should work:
gs = gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=axs[0].get_subplotspec())
assert gs.get_topmost_subplotspec() == axs[0].get_subplotspec()
with pytest.raises(TypeError, match="subplot_spec must be type SubplotSpec"):
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[0])
with pytest.raises(TypeError, match="subplot_spec must be type SubplotSpec"):
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs)
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@tests@test_gridspec.py@.PATH_END.py
|
{
"filename": "update_test.py",
"repo_name": "deepmind/optax",
"repo_path": "optax_extracted/optax-main/optax/_src/update_test.py",
"type": "Python"
}
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for methods in `update.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
from optax._src import update
class UpdateTest(chex.TestCase):
@chex.all_variants
def test_apply_updates(self):
params = ({'a': jnp.ones((3, 2))}, jnp.ones((1,)))
grads = jax.tree.map(lambda t: 2 * t, params)
exp_params = jax.tree.map(lambda t: 3 * t, params)
new_params = self.variant(update.apply_updates)(params, grads)
chex.assert_trees_all_close(exp_params, new_params, atol=1e-10, rtol=1e-5)
@chex.all_variants
def test_apply_updates_mixed_precision(self):
params = (
{'a': jnp.ones((3, 2), dtype=jnp.bfloat16)},
jnp.ones((1,), dtype=jnp.bfloat16),
)
grads = jax.tree.map(lambda t: (2 * t).astype(jnp.float32), params)
new_params = self.variant(update.apply_updates)(params, grads)
for leaf in jax.tree.leaves(new_params):
assert leaf.dtype == jnp.bfloat16
@chex.all_variants
def test_incremental_update(self):
params_1 = ({'a': jnp.ones((3, 2))}, jnp.ones((1,)))
params_2 = jax.tree.map(lambda t: 2 * t, params_1)
exp_params = jax.tree.map(lambda t: 1.5 * t, params_1)
new_params = self.variant(update.incremental_update)(
params_2, params_1, 0.5
)
chex.assert_trees_all_close(exp_params, new_params, atol=1e-10, rtol=1e-5)
@chex.all_variants
def test_periodic_update(self):
params_1 = ({'a': jnp.ones((3, 2))}, jnp.ones((1,)))
params_2 = jax.tree.map(lambda t: 2 * t, params_1)
update_period = 5
update_fn = self.variant(update.periodic_update)
for j in range(3):
for i in range(1, update_period):
new_params = update_fn(
params_2, params_1, j * update_period + i, update_period
)
chex.assert_trees_all_close(params_1, new_params, atol=1e-10, rtol=1e-5)
new_params = update_fn(
params_2, params_1, (j + 1) * update_period, update_period
)
chex.assert_trees_all_close(params_2, new_params, atol=1e-10, rtol=1e-5)
@parameterized.named_parameters(
dict(testcase_name='apply_updates', operation=update.apply_updates),
dict(
testcase_name='incremental_update',
operation=lambda x, y: update.incremental_update(x, y, 1),
),
)
def test_none_argument(self, operation):
x = jnp.array([1.0, 2.0, 3.0])
operation(None, x)
if __name__ == '__main__':
absltest.main()
|
deepmindREPO_NAMEoptaxPATH_START.@optax_extracted@optax-main@optax@_src@update_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "tomasstolker/species",
"repo_path": "species_extracted/species-main/species/data/phot_data/__init__.py",
"type": "Python"
}
|
tomasstolkerREPO_NAMEspeciesPATH_START.@species_extracted@species-main@species@data@phot_data@__init__.py@.PATH_END.py
|
|
{
"filename": "testplot_planet_growth.py",
"repo_name": "miosta/drift_composition",
"repo_path": "drift_composition_extracted/drift_composition-main/drift_composition/testplot_planet_growth.py",
"type": "Python"
}
|
from drift_composition.constants import Mearth, Msun, Rau, yr
from drift_composition.grid import Grid
from drift_composition.disc import DiscModel
from drift_composition.molecule import get_molecular_properties
from drift_composition.simple_planet import Planet, PlanetEnv
from drift_composition.atoms import atoms_in_molecule, ELEMENT_MASS
from drift_composition.simple_reduction import Evolution, atom_mass, dust_to_gas
import drift_composition.simple_reduction as red
import drift_composition.simple_planet as dumb
import matplotlib.pyplot as plt
import numpy as np
def multi_plot(ms,mcs,mgs,mco_g,mco_d,rrs,Nt,dt,titles):
fig, (ax,ax2) = plt.subplots(2,1,sharex=True)
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
ax.set_ylabel("migrated distance [AU]")
ax.plot(0,0,'o')
#ax2 = ax.twinx()
for (rr,c,t) in zip(rrs,colors,titles):
ax.plot(np.arange(Nt)*dt, (rr[0]-np.array(rr))/Rau, c=c,alpha=0.7, label='radius {}'.format(t))
for (m,mc,mg,rr,c,t) in zip(ms,mcs,mgs,rrs,colors,titles):
#print(rr)
if rr[0] == rrs[0,0]:
ax2.plot(np.arange(Nt)*dt, np.array(m)*Msun/Mearth, '-', c=c, label='mass {}'.format(t))
ax2.plot(np.arange(Nt)*dt, np.array(mc)*Msun/Mearth, ':', c=c, label='core {}'.format(t))
ax2.plot(np.arange(Nt)*dt, np.array(mg)*Msun/Mearth, '--', c=c,label='gas {}'.format(t))
else:
ax2.plot(np.arange(Nt)*dt, np.array(m)*Msun/Mearth, '-', c=c)
ax2.plot(np.arange(Nt)*dt, np.array(mc)*Msun/Mearth, ':', c=c,)
ax2.plot(np.arange(Nt)*dt, np.array(mg)*Msun/Mearth, '--', c=c)
#ax2.set_yscale('log')
#ax.set_yscale('log')
#ax2.set_xscale('log')
ax2.set_xlabel("time [yr]")
ax2.set_ylabel("mass [M_Earth]")
ax.legend()
ax2.legend()
#ax.set_title('M_dot =1e-9')
#plt.savefig('mdot1e-9.png')
plt.show()
pass
def multi_plan(ms,mcs,mgs,rrs,Nt,dt,titles):
fig, (ax) = plt.subplots()
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
#ax.set_ylabel("distance [AU]")
#ax.plot(0,0,'o')
for (m,mc,mg,rr,c,t) in zip(ms,mcs,mgs,rrs,colors,titles):
print(rr.shape, m.shape)
if rr[0] == rrs[0,0]:
ax.plot(np.array(rr)/Rau, np.array(m)*Msun/Mearth, c=c, label='total mass'.format())
ax.plot(np.array(rr)/Rau, np.array(mg)*Msun/Mearth, '--', c=c, label='gas'.format())
ax.plot(np.array(rr)/Rau, np.array(mc)*Msun/Mearth, ':', c=c, label='core'.format())
else:
ax.plot(np.array(rr)/Rau, np.array(m)*Msun/Mearth, '-', c=c)
ax.plot(np.array(rr)/Rau, np.array(mg)*Msun/Mearth, '--', c=c)
ax.plot(np.array(rr)/Rau, np.array(mc)*Msun/Mearth, ':', c=c)
ax.set_yscale('log')
#ax.set_xscale('log')
ax.set_xlim(1,4.5e1)
ax.set_xlabel("Distance [au]")
ax.set_ylabel(r"Planet Mass [$\mathrm{M}_{\oplus}$]")
ax.legend()
#ax2.legend(loc=1)
#ax.set_title('M_dot =1e-9')
#plt.savefig('mdot1e-9.png')
plt.show()
pass
def plot_planet_comp(e, comp='CO', title=''):
fig, ax = plt.subplots()
ax.plot(e.time, e.masses*Msun/Mearth, 'k-', label='total')
ax.plot(e.time, e.mcs*Msun/Mearth, 'g:', label='core')
ax.plot(e.time, e.mgs*Msun/Mearth, 'b-.', label='gas')
ax.plot(e.time, e.f_comps[comp][0]*Msun/Mearth, 'c:', label='{} gas'.format(comp))
ax.plot(e.time, e.f_comps[comp][1]*Msun/Mearth, 'c--', label='{} dust'.format(comp))
#ax.set_yscale('log')
#ax.set_xscale('log')
ax.set_xlabel("time [yr]")
ax.set_ylabel("mass [M_Earth]")
ax.legend()
#ax.savefig('frag_masses.png')
ax.text(0, 1, title)
plt.show()
pass
def plot_planet(e):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.plot(e.time, e.masses*Msun/Mearth, 'k-', label='mass total')
plt.plot(e.time, e.mgs*Msun/Mearth, 'k--', label='gas total')
plt.plot(e.time, e.mcs*Msun/Mearth, 'k:', label='dust total')
for name, c in zip(list(e.f_comps.keys()),colors):
plt.plot(e.time, e.f_comps[name][0]*Msun/Mearth, '--', c=c, label=name)
plt.plot(e.time, e.f_comps[name][1]*Msun/Mearth, ':', c=c)
plt.ylim(1e-5,1e3)
plt.ylabel(r'mass [$M_{\oplus}$]')
plt.xlabel('time [yr]')
plt.legend()
plt.yscale('log')
plt.show()
plt.plot(e.rs, e.masses*Msun/Mearth, 'k-', label='mass total')
plt.plot(e.rs, e.mgs*Msun/Mearth, 'k--', label='gas total')
plt.plot(e.rs, e.mcs*Msun/Mearth, 'k:', label='dust total')
for name, c in zip(list(e.f_comps.keys()),colors):
plt.plot(e.rs, e.f_comps[name][0]*Msun/Mearth, '--', c=c, label=name)
plt.plot(e.rs, e.f_comps[name][1]*Msun/Mearth, ':', c=c)
plt.ylim(1e-5,1e3)
plt.xlabel('radius [au]')
plt.ylabel(r'mass [$M_{\oplus}$]')
plt.legend()
plt.yscale('log')
plt.show()
pass
def lecture_plot():
Mdot_gas = 1e-8
Mdot_dust = 1e-9
Stokes = lambda R: 0.01
T = lambda R: 150*(R/Rau)**-0.5
alpha = lambda R: 1e-3
species, abundances = get_molecular_properties()
f_comp = dict(zip([spec.name for spec in species],np.zeros((len(species),2))))
f_comp['H2'] = np.zeros(2)
f_comp['Si'] = np.zeros(2)
grid = Grid(0.1*Rau, 300*Rau, 512)
p_env = PlanetEnv(grid, alpha(grid.Rc), 2.35, 1.0)
dt = 1000
Nt = 450
f_plansi = 1e-1
frac_gc = 0.1
init_m = 0.50
ms2=np.zeros(Nt)
mcs2=np.zeros(Nt)
mgs2=np.zeros(Nt)
rrs2=np.zeros(Nt)
for r,mm in zip((15,30,40),(0.1,0.1,0.1,0.1,0.1,0.1)):
DM = DiscModel(grid, Mdot_gas, alpha, T)
DM.compute_dust_surface_density(Mdot_dust, Stokes)
p_env = PlanetEnv(grid, 1e-3, 2.35, 1.0)
planet = Planet(mm*init_m*Mearth/Msun, mm*init_m*(1-frac_gc)*Mearth/Msun, mm*init_m*(frac_gc)*Mearth/Msun, f_comp,r*Rau)
m,mc,mg,mco_g,mco_d,rr = std_mig(planet, DM, p_env, T(grid.Rc),f_plansi, dt, Nt)
ms2=np.vstack((ms2,m))
mcs2=np.vstack((mcs2,mc))
mgs2=np.vstack((mgs2,mg))
rrs2=np.vstack((rrs2,rr))
multi_plan(ms2[1:],mcs2[1:],mgs2[1:],rrs2[1:],Nt,dt,(15,30,40))
pass
def plot_CtoO(e,atm1,atm2,solar = 1.):
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2, sharex='col', sharey='row')
atoms = (atm1, atm2)
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
for (x,a1,a2) in zip((e.time,e.rs),(ax1,ax2),(ax3,ax4)):
a1.plot(x, e.masses*Msun/Mearth, 'k-', label='mass total')
a1.plot(x, e.mgs*Msun/Mearth, 'k--', label='gas total')
a1.plot(x, e.mcs*Msun/Mearth, 'k:', label='dust total')
for atom,c in zip(atoms,colors):
a1.plot(x, e.f_atms[atom][0]*Msun/Mearth, '--', c=c, label=atom)
a1.plot(x, e.f_atms[atom][1]*Msun/Mearth, ':', c=c)
a2.plot(x, (e.f_atms[atm1][0]/e.f_atms[atm2][0])*(ELEMENT_MASS[atm2]/ELEMENT_MASS[atm1])/solar, label='gas only')
a2.plot(x, ((e.f_atms[atm1][0]+e.f_atms[atm1][1])/(e.f_atms[atm2][0]+e.f_atms[atm2][1])*(ELEMENT_MASS[atm2]/ELEMENT_MASS[atm1]))/solar, label='gas+dust')
ax3.set_xlabel('time [yr]')
ax4.set_xlabel('distance [au]')
ax1.set_ylabel(r'mass [$M_{\oplus}$]')
ax3.set_ylabel(r'{}/{}'.format(atm1,atm2))
ax1.set_ylim(1e-3,1e3)
ax2.set_ylim(1e-3,1e3)
ax1.set_yscale('log')
ax2.set_yscale('log')
ax1.legend()
ax3.legend()
plt.show()
pass
def plot_CtoO_cut(e,atm1,atm2,cut_index,solar = 1.):
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2, sharex='col', sharey='row')
atoms = (atm1, atm2)
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
mgss, mcss, comps, f_atms = dust_to_gas(e, cut_index)
for (x,a1,a2) in zip((e.time,e.rs),(ax1,ax2),(ax3,ax4)):
a1.plot(x, e.masses*Msun/Mearth, 'k-', label='mass total')
a1.plot(x, mgss*Msun/Mearth, 'k--', label='gas total')
a1.plot(x, e.mcs*Msun/Mearth, 'k:', alpha=0.5)
a1.plot(x, mcss*Msun/Mearth, 'k:', label='dust total')
for atom,c in zip(atoms,colors):
a1.plot(x, f_atms[atom][0]*Msun/Mearth, '--', c=c, label=atom)
a1.plot(x, f_atms[atom][1]*Msun/Mearth, ':', c=c)
a1.plot(x, e.f_atms[atom][0]*Msun/Mearth, '--', c=c, alpha=0.5)
a1.plot(x, e.f_atms[atom][1]*Msun/Mearth, ':', c=c, alpha=0.5)
a2.plot(x[1:], (e.f_atms[atm1][0][1:]/e.f_atms[atm2][0][1:])*(ELEMENT_MASS[atm2]/ELEMENT_MASS[atm1])/solar, label='gas only')
a2.plot(x[1:], (f_atms[atm1][0][1:]/f_atms[atm2][0][1:])*(ELEMENT_MASS[atm2]/ELEMENT_MASS[atm1])/solar, label='enriched_gas')
a2.plot(x, ((f_atms[atm1][0]+f_atms[atm1][1])/(f_atms[atm2][0]+f_atms[atm2][1])*(ELEMENT_MASS[atm2]/ELEMENT_MASS[atm1]))/solar, label='gas+dust')
ax3.set_xlabel('time [yr]')
ax4.set_xlabel('distance [au]')
ax1.set_ylabel(r'mass [$M_{\oplus}$]')
ax3.set_ylabel(r'{}/{}'.format(atm1,atm2))
ax1.set_ylim(1e-3,1e3)
ax2.set_ylim(1e-3,1e3)
ax1.set_yscale('log')
ax2.set_yscale('log')
ax1.legend()
ax3.legend()
plt.show()
pass
def plot_atoms(e):
ms3 = e.masses
#print(atom_mass(planet_evo[0].f_comp))
atoms = list(ELEMENT_MASS.keys())
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.plot(e.rs, ms3*Msun/Mearth, 'k-', label='mass total')
plt.plot(e.rs, e.mgs*Msun/Mearth, 'k--', label='gas total')
plt.plot(e.rs, e.mcs*Msun/Mearth, 'k:', label='dust total')
plt.plot(e.rs, e.f_atms['Si'][0]*Msun/Mearth, '--', c='b', label='Si')
plt.plot(e.rs, e.f_atms['Si'][1]*Msun/Mearth, ':', c='b')
for atom,c in zip(atoms,colors):
plt.plot(e.rs, e.f_atms[atom][0]*Msun/Mearth, '--', c=c, label=atom)
plt.plot(e.rs, e.f_atms[atom][1]*Msun/Mearth, ':', c=c)
plt.xlabel('Radius [au]')
plt.ylabel(r'mass [$M_{\oplus}$]')
plt.ylim(1e-3,1e3)
plt.yscale('log')
plt.legend()
plt.show()
pass
def main():
'''
Mdot_gas = 1e-8
Mdot_dust = 1e-9
Stokes = lambda R: 0.01
T = lambda R: 150*(R/Rau)**-0.5
alpha = lambda R: 1e-3
species, abundances = get_molecular_properties()
print([spec.name for spec in species])
grid = Grid(0.1*Rau, 300*Rau, 512)
p_env = PlanetEnv(grid, alpha(grid.Rc), 2.35, 1.0)
dt = 5000
Nt = 100
f_plansi = 1e-1
ms2=np.zeros(Nt)
mcs2=np.zeros(Nt)
mgs2=np.zeros(Nt)
mco2_gs=np.zeros(Nt)
mco2_ds=np.zeros(Nt)
rrs2=np.zeros(Nt)
frac_gc = 0.1
init_m = 5.0
for r,mm in zip((8,16,32,8),(1,1,1,1,1,1)):
f_comp = dict(zip([spec.name for spec in species],np.zeros((len(species),2))))
f_comp['H2'] = np.zeros(2)
f_comp['Si'] = np.zeros(2)
alpha = lambda R: 1e-3
DM = DiscModel(grid, Mdot_gas, alpha, T)
DM.compute_dust_surface_density(Mdot_dust, Stokes)
species, abundances = get_molecular_properties()
DM.compute_chemistry(species, abundances )
p_env = PlanetEnv(grid, 1e-3, 2.35, 1.0)
planet = Planet(mm*init_m*Mearth/Msun, mm*init_m*(1-frac_gc)*Mearth/Msun, mm*init_m*(frac_gc)*Mearth/Msun, f_comp,r*Rau)
m,mc,mg,mco_g,mco_d,rr = dumb.std_evo(planet, DM, p_env, T(grid.Rc),f_plansi, dt, Nt, comp='CO')
ms2=np.vstack((ms2,m))
mcs2=np.vstack((mcs2,mc))
mgs2=np.vstack((mgs2,mg))
mco2_gs=np.vstack((mco2_gs,mco_g))
mco2_ds=np.vstack((mco2_ds,mco_d))
rrs2=np.vstack((rrs2,rr))
'''
#print(mco2_gs[4])
#multi_plot(ms2[1:],mcs2[1:],mgs2[1:],mco2_gs[1:],mco2_ds[1:],rrs2[1:],Nt,dt,(8,16,32,64))
#plot_planet_comp(ms2[4],mcs2[4],mgs2[4],mco2_gs[4],mco2_ds[4],rrs2[4],Nt,dt,'')
Mdot_gas = 1e-8
Mdot_dust = 1e-9
Stokes = lambda R: 0.01
T = lambda R: 150*(R/Rau)**-0.5
alpha = lambda R: 1e-3
grid = Grid(0.1*Rau, 300*Rau, 512)
DM = DiscModel(grid, Mdot_gas, alpha, T)
DM.compute_dust_surface_density(Mdot_dust, Stokes)
species, abundances = get_molecular_properties()
DM.compute_chemistry(species, abundances )
species, abundances = get_molecular_properties()
p_env = PlanetEnv(grid, alpha(grid.Rc), 2.35, 1.0)
SOLAR_OH = 0.0005242
SOLAR_CO = 326./477.
SOLAR_Z = 0.0134
dt = 5000
Nt = 2000
f_plansi = 1e-2
frac_gc = 0.01
init_m = 5.0
f_comp = dict(zip([spec.name for spec in species],np.zeros((len(species),2))))
f_comp['H2'] = np.zeros(2)
f_comp['Si'] = np.zeros(2)
planet_ini = Planet(init_m*Mearth/Msun, init_m*(1-frac_gc)*Mearth/Msun, init_m*(frac_gc)*Mearth/Msun, f_comp, 8.5*Rau)
planet_evo, nn = dumb.std_evo_comp(planet_ini, DM, p_env, T(grid.Rc),f_plansi, dt, Nt)
#plot_planet(planet_evo,Nt,dt)
evolution = Evolution(planet_evo, nn, dt)
#print(atom_mass(planet_evo[0].f_comp), '\n', atom_mass(planet_evo[-1].f_comp),'\n', planet_evo[-1].f_comp)
#plot_planet(evolution)
#plot_atoms(evolution)
evolution = Evolution(planet_evo, nn, dt)
print(evolution.rs[-1], nn, len(evolution.rs))
red.store_data_range(planet_ini, DM, p_env, T)
#plot_CtoO_cut(evolution,'O','H', red.run_away(evolution), solar = SOLAR_OH)
#plot_CtoO(evolution,'O','H', solar = SOLAR_OH)
#plot_CtoO(evolution,'C','O', solar = SOLAR_CO)
#plot_CtoO_cut(evolution, 'C','O', red.run_away(evolution), solar= SOLAR_CO)
pass
if '__main__'==__name__:
main()
|
miostaREPO_NAMEdrift_compositionPATH_START.@drift_composition_extracted@drift_composition-main@drift_composition@testplot_planet_growth.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py3/ipywidgets/tests/__init__.py",
"type": "Python"
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py3@ipywidgets@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "_tickformat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2dcontour/colorbar/_tickformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="tickformat",
parent_name="histogram2dcontour.colorbar",
**kwargs,
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2dcontour@colorbar@_tickformat.py@.PATH_END.py
|
{
"filename": "x_p_sbi.ipynb",
"repo_name": "xpsi-group/xpsi",
"repo_path": "xpsi_extracted/xpsi-main/docs/source/x_p_sbi.ipynb",
"type": "Jupyter Notebook"
}
|
# Posterior Inference using SBI
Simulation-Based Inference (SBI) with Neural Posterior Estimation (NPE) is a statistical framework for parameter inference in complex models. Instead of relying on an explicit likelihood function, NPE uses simulations to generate synthetic data and trains neural networks to learn an approximate posterior distribution of the parameters given observed data. This approach is typically used when the likelihood is intractable or expensive to compute, making traditional methods impractical. For pulse profile modelling, while the likelihood computation is tractable, sampling complex high-dimensioinal parameter spaces can get computationally expensive. SBI provides a potentially lucrative alternative to this problem.
In this example notebook, we utilize the `sbi` package to perform this. Refer to the installation instructions for additional dependencies required to run this notebook.
## Initialization
```python
## IMPORTANT: Import sequence - torch, sbi, and xpsi.
import torch
import torch.nn as nn
import torch.nn.functional as F
from sbi import utils as utils
from sbi import analysis as analysis
from sbi.neural_nets import posterior_nn
from sbi.inference import SNPE, simulate_for_sbi
from sbi.utils.user_input_checks import prepare_for_sbi
import numpy as np
import math
from matplotlib import pyplot as plt
from matplotlib import rcParams
rc = {"font.family" : "serif",
"mathtext.fontset" : "stix"}
plt.rcParams.update(rc)
plt.rcParams["font.serif"] = ["Times New Roman"] + plt.rcParams["font.serif"]
plt.rcParams.update({'font.size': 18})
plt.rcParams.update({'legend.fontsize': 15})
import xpsi
import sys
## Add your path to the example modules to run this notebook
sys.path.append('<path-to-xpsi>/xpsi/examples/examples_fast/Modules/')
import xpsi.SBI_wrapper as SBI_wrapper
from xpsi.SBI_wrapper import xpsi_wrappers
import xpsi.utilities.Example_CNNs as CNNs
```
/=============================================\
| X-PSI: X-ray Pulse Simulation and Inference |
|---------------------------------------------|
| Version: 2.2.7 |
|---------------------------------------------|
| https://xpsi-group.github.io/xpsi |
\=============================================/
Imported emcee version: 3.1.4
Imported PyMultiNest.
Imported UltraNest.
Imported GetDist version: 1.4.7
Imported nestcheck version: 0.2.1
```python
# Check that cuda is available
print( 'cuda is available: ' , torch.cuda.is_available() )
```
cuda is available: True
Importing modules from `examples_fast`
```python
import main
```
Loading the data assuming the notebook was run for documentation pages
Setting channels for event data...
Channels set.
Setting channels for loaded instrument response (sub)matrix...
Channels set.
No parameters supplied... empty subspace created.
Creating parameter:
> Named "phase_shift" with fixed value 0.000e+00.
> The phase shift for the signal, a periodic parameter [cycles].
Creating parameter:
> Named "frequency" with fixed value 3.140e+02.
> Spin frequency [Hz].
Creating parameter:
> Named "mass" with bounds [1.000e+00, 1.600e+00].
> Gravitational mass [solar masses].
Creating parameter:
> Named "radius" with bounds [1.000e+01, 1.300e+01].
> Coordinate equatorial radius [km].
Creating parameter:
> Named "distance" with bounds [5.000e-01, 2.000e+00].
> Earth distance [kpc].
Creating parameter:
> Named "cos_inclination" with bounds [0.000e+00, 1.000e+00].
> Cosine of Earth inclination to rotation axis.
Creating parameter:
> Named "super_colatitude" with bounds [1.000e-03, 1.570e+00].
> The colatitude of the centre of the superseding region [radians].
Creating parameter:
> Named "super_radius" with bounds [1.000e-03, 1.570e+00].
> The angular radius of the (circular) superseding region [radians].
Creating parameter:
> Named "phase_shift" with bounds [-2.500e-01, 7.500e-01].
> The phase of the hot region, a periodic parameter [cycles].
Creating parameter:
> Named "super_temperature" with bounds [6.000e+00, 7.000e+00].
> log10(superseding region effective temperature [K]).
Creating parameter:
> Named "mode_frequency" with fixed value 3.140e+02.
> Coordinate frequency of the mode of radiative asymmetry in the
photosphere that is assumed to generate the pulsed signal [Hz].
No parameters supplied... empty subspace created.
Checking likelihood and prior evaluation before commencing sampling...
Not using ``allclose`` function from NumPy.
Using fallback implementation instead.
Checking closeness of likelihood arrays:
-3.1603740790e+04 | -3.1603740790e+04 .....
Closeness evaluated.
Log-likelihood value checks passed on root process.
Checks passed.
## Preparing for SBI
First we follow procedures for synthetic data generation that will be used by SBI to generate training data. The `SBI_wrapper` module consists of multiple classes and functions, including the usual data synthesis process but with some extended functionalities.
```python
_data = SBI_wrapper.SynthesiseData(main.Instrument.channels,
main.data.phases,
0,
len(main.Instrument.channels) - 1)
```
Setting channels for event data...
Channels set.
```python
main.CustomSignal.synthesise = SBI_wrapper.synthesise
signal = main.CustomSignal(data = _data,
instrument = main.Instrument,
background = None,
interstellar = None,
prefix='instr',
cache=True)
```
Creating parameter:
> Named "phase_shift" with fixed value 0.000e+00.
> The phase shift for the signal, a periodic parameter [cycles].
No data... can synthesise data but cannot evaluate a likelihood function.
In the code block above, the background has been set to `None`.
In its current form, it's recommended to either fix the background or use a parameterized functional background model since the `default_background_marginalisation` is only utilized during the likelihood computation process that SBI skips.
In principle, one may leave the background free and then allow the neural network to simply learn what the background is for any given dataset. However, performance in such a scenario has not been tested, and one may expect that to introduce too much degeneracy in the parameter space for SBI to work meaningfully.
In the code block below, we are using a `Custom_SBI_Likelihood` that inherits fromt the `xpsi.Likelihood` class and modifies that `_driver` and `synthesise` methods to return `model_flux` that is the synthesised signal, which then constitutes the training dataset.
```python
likelihood = SBI_wrapper.Custom_SBI_Likelihood(star = main.star,
signals = signal,
prior = main.prior,
num_energies = 64,
threads = 1)
print(likelihood)
```
Free parameters
---------------
mass: Gravitational mass [solar masses].
radius: Coordinate equatorial radius [km].
distance: Earth distance [kpc].
cos_inclination: Cosine of Earth inclination to rotation axis.
p__phase_shift: The phase of the hot region, a periodic parameter [cycles].
p__super_colatitude: The colatitude of the centre of the superseding region [radians].
p__super_radius: The angular radius of the (circular) superseding region [radians].
p__super_temperature: log10(superseding region effective temperature [K]).
During training, SBI learns the joint data and model space $P(\theta; D)$, where the model is defined by the free parameters (the ones shown above in this example). In this process, it essentially also approximates the likelihood $P(D|\theta)$ without explicitly calculating it.
## Training SBI
With our prerequisites in place, we first instantiate our `simulator` and `prior` for SBI. To do so, we need to inform the `sbi` package about our pulse profile generator (defined by `SBI_wrapper.xpsi_wrappers.simulator`) and prior distributions (defined by `SBI_wrapper.xpsi_wrappers.sample` and `SBI_wrapper.xpsi_wrappers.log_prob`) for our free model parameters. These methods are in compliance with the requirements of the `sbi` package for training.
The `SBI_wrapper.xpsi_wrappers.simulator` calls the `synthesise` method that we bound to `main.CustomSignal` above, which in turn requires information about the `exposure_time` (or `expected_source_counts`), `nchannels` and `nphases`.
The `prepare_for_sbi` functionality of `sbi` then checks whether its internal requirements are met, reshapes and typecasts them into usable products for training.
```python
instr_kwargs = dict(exposure_time = 1.0E+06, # alternatively input
# expected_source_counts
nchannels = len(main.Instrument.channels),
nphases = len(main.data.phases))
wrapped = xpsi_wrappers(prior = main.prior,
likelihood = likelihood,
instr_kwargs = instr_kwargs,
train_using_CNNs = True)
simulator, prior = prepare_for_sbi(wrapped.simulator, wrapped)
```
/tmp/ipykernel_902895/4002819185.py:11: DeprecationWarning: This method is deprecated as of sbi version v0.23.0. and will be removed in a future release.Please use `process_prior` and `process_simulator` in the future.
simulator, prior = prepare_for_sbi(wrapped.simulator, wrapped)
/home/lmauviard/anaconda3/envs/xpsi_py3/lib/python3.11/site-packages/sbi/utils/user_input_checks_utils.py:28: UserWarning: No prior bounds were passed, consider passing lower_bound and / or upper_bound if your prior has bounded support.
self.custom_support = build_support(lower_bound, upper_bound)
/home/lmauviard/anaconda3/envs/xpsi_py3/lib/python3.11/site-packages/sbi/utils/user_input_checks_utils.py:30: UserWarning: Prior is lacking mean attribute, estimating prior mean from samples.
self._set_mean_and_variance()
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
/home/lmauviard/anaconda3/envs/xpsi_py3/lib/python3.11/site-packages/sbi/utils/user_input_checks_utils.py:30: UserWarning: Prior is lacking variance attribute, estimating prior variance from samples.
self._set_mean_and_variance()
Now, we generate training samples. Here we are using 1000 samples for training. This is insufficient for the complexity of the given model and is only used for tutorial purposes.
(Tip: Save the training samples for future use. Not done here.)
```python
theta, x = simulate_for_sbi(simulator,
proposal=prior,
num_simulations=1000, # Reduce for quick testing
num_workers=1)
```
Drawing samples from the joint prior...
Samples drawn.
0%| | 0/1000 [00:00<?, ?it/s]
Phase-energy-resolved pulse profiles are equivalent to 2D (grayscale) images. Therefore, Convolutional Neural Networks (CNNs) can be utilized, in principle, to better capture neighbouring information between adjacent sets of pixels. `xpsi.utilities.Example_CNNs` provides a set of CNN architectures, including pre-trained architectures from PyTorch `ResNet` and `ResNeXt`.
Below we use the relatively simple CNN architecture `C2P1_FC1` to pass onto `sbi` as an `embedding_net` for feature retrieval during training. One may also choose to not use any `embedding_net` and simply pass a flattened 1D pulse profile (or a 1D bolometric pulse profile).
```python
embedding_net = CNNs.C2P1_FC1(nchannels = len(main.Instrument.channels),
nphases = len(main.data.phases) - 1)
```
We now instantiate the neural posterior estimator `SNPE`, using our sbi-compatible `prior` and a `density_estimator` where we specify NN-architecture for this. In this notebook, we have used a Masked Autoregressive Flow (MAF) model architecture. The `sbi` package documentation details other model architectures that we can use in this regard.
```python
neural_posterior = posterior_nn(model="maf", # Masked Autoregressive Flow
embedding_net=embedding_net, # skip if not using CNN
hidden_features=10,
num_transforms=2)
device = "cuda" if torch.cuda.is_available() else "cpu" # cuda for training on Nvidia GPUs
inference = SNPE(prior=prior, density_estimator=neural_posterior, device=device)
```
Drawing samples from the joint prior...
Samples drawn.
Time to train! 🏃♂️💪
```python
density_estimator = inference.append_simulations(theta, x).train()
```
/home/lmauviard/anaconda3/envs/xpsi_py3/lib/python3.11/site-packages/sbi/inference/trainers/npe/npe_base.py:157: UserWarning: Data x has device 'cuda:0'. Moving x to the data_device 'cuda'. Training will proceed on device 'cuda'.
theta, x = validate_theta_and_x(
/home/lmauviard/anaconda3/envs/xpsi_py3/lib/python3.11/site-packages/sbi/inference/trainers/npe/npe_base.py:157: UserWarning: Parameters theta has device 'cuda:0'. Moving theta to the data_device 'cuda'. Training will proceed on device 'cuda'.
theta, x = validate_theta_and_x(
Neural network successfully converged after 949 epochs.
Hooray! 🎉
Note that the number of epochs required for training can vary quite a lot upon re-running this notebook since we are using too few training samples.
## Plotting posteriors
```python
posterior = inference.build_posterior(density_estimator)
```
Drawing samples from the joint prior...
Samples drawn.
Drawing samples from the joint prior...
Samples drawn.
Finally, we plot the posteriors corresponding to an input test observation. We draw 10k samples from the posterior to plot the distribution, using the native plotter in the `sbi` package.
```python
p = [1.4, # mass
12, # radius
1.0, # distance
math.cos(np.radians(60)), # inclination
0.5, # phase shift
np.radians(70), # super colatitude
0.75, # super radius
6.7] # super temperature
test_observation = torch.Tensor(likelihood.synthesise(p, force=True, instr=instr_kwargs))
if torch.cuda.is_available():
test_observation = test_observation.cuda()
samples = posterior.sample((10000,), x=test_observation)
_ = analysis.pairplot(samples.cpu(),
limits=[[1, 3.0], [3, 20], [0.0, 2.0], [0.0, math.pi], [0.0, 1.0], [0.0, math.pi], [0.0, math.pi/2.0], [5.0, 7.0]],
figsize=(10, 10),
points=np.array(p),
labels=[r'M$_{\odot}$', r'R [km]', r'D [pc]', r'cos $i$', r'$\phi$ [cycles]', r'$\theta$ [rad]', r'$\zeta$ [rad]', r'T [K]'])
```
Drawing 10000 posterior samples: 0%| | 0/10000 [00:00<?, ?it/s]

```python
```
|
xpsi-groupREPO_NAMExpsiPATH_START.@xpsi_extracted@xpsi-main@docs@source@x_p_sbi.ipynb@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contour/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="contour.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contour@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "matfuncs.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/linalg/matfuncs.py",
"type": "Python"
}
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm',
'tanhm', 'logm', 'funm', 'signm', 'sqrtm',
'expm_frechet', 'expm_cond', 'fractional_matrix_power',
'khatri_rao', 'norm', 'solve', 'inv', 'svd', 'schur', 'rsf2csf'
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="linalg", module="matfuncs",
private_modules=["_matfuncs"], all=__all__,
attribute=name)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@linalg@matfuncs.py@.PATH_END.py
|
{
"filename": "vi_posterior.py",
"repo_name": "sbi-dev/sbi",
"repo_path": "sbi_extracted/sbi-main/sbi/inference/posteriors/vi_posterior.py",
"type": "Python"
}
|
# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
# under the Apache License Version 2.0, see <https://www.apache.org/licenses/>
import copy
from copy import deepcopy
from typing import Callable, Dict, Iterable, Optional, Union
import numpy as np
import torch
from torch import Tensor
from torch.distributions import Distribution
from tqdm.auto import tqdm
from sbi.inference.posteriors.base_posterior import NeuralPosterior
from sbi.inference.potentials.base_potential import BasePotential
from sbi.samplers.vi import (
adapt_variational_distribution,
check_variational_distribution,
get_VI_method,
get_flow_builder,
get_quality_metric,
make_object_deepcopy_compatible,
move_all_tensor_to_device,
)
from sbi.sbi_types import (
PyroTransformedDistribution,
Shape,
TorchDistribution,
TorchTensor,
TorchTransform,
)
from sbi.utils.sbiutils import mcmc_transform
from sbi.utils.torchutils import atleast_2d_float32_tensor, ensure_theta_batched
class VIPosterior(NeuralPosterior):
r"""Provides VI (Variational Inference) to sample from the posterior.<br/><br/>
SNLE or SNRE train neural networks to approximate the likelihood(-ratios).
`VIPosterior` allows to learn a tractable variational posterior $q(\theta)$ which
approximates the true posterior $p(\theta|x_o)$. After this second training stage,
we can produce approximate posterior samples, by just sampling from q with no
additional cost. For additional information see [1] and [2].<br/><br/>
References:<br/>
[1] Variational methods for simulation-based inference, Manuel Glöckler, Michael
Deistler, Jakob Macke, 2022, https://openreview.net/forum?id=kZ0UYdhqkNY<br/>
[2] Sequential Neural Posterior and Likelihood Approximation, Samuel Wiqvist, Jes
Frellsen, Umberto Picchini, 2021, https://arxiv.org/abs/2102.06522
"""
def __init__(
self,
potential_fn: Union[Callable, BasePotential],
prior: Optional[TorchDistribution] = None,
q: Union[str, PyroTransformedDistribution, "VIPosterior", Callable] = "maf",
theta_transform: Optional[TorchTransform] = None,
vi_method: str = "rKL",
device: str = "cpu",
x_shape: Optional[torch.Size] = None,
parameters: Iterable = [],
modules: Iterable = [],
):
"""
Args:
potential_fn: The potential function from which to draw samples. Must be a
`BasePotential` or a `Callable` which takes `theta` and `x_o` as inputs.
prior: This is the prior distribution. Note that this is only
used to check/construct the variational distribution or within some
quality metrics. Please make sure that this matches with the prior
within the potential_fn. If `None` is given, we will try to infer it
from potential_fn or q, if this fails we raise an Error.
q: Variational distribution, either string, `TransformedDistribution`, or a
`VIPosterior` object. This specifies a parametric class of distribution
over which the best possible posterior approximation is searched. For
string input, we currently support [nsf, scf, maf, mcf, gaussian,
gaussian_diag]. You can also specify your own variational family by
passing a pyro `TransformedDistribution`.
Additionally, we allow a `Callable`, which allows you the pass a
`builder` function, which if called returns a distribution. This may be
useful for setting the hyperparameters e.g. `num_transfroms` within the
`get_flow_builder` method specifying the number of transformations
within a normalizing flow. If q is already a `VIPosterior`, then the
arguments will be copied from it (relevant for multi-round training).
theta_transform: Maps form prior support to unconstrained space. The
inverse is used here to ensure that the posterior support is equal to
that of the prior.
vi_method: This specifies the variational methods which are used to fit q to
the posterior. We currently support [rKL, fKL, IW, alpha]. Note that
some of the divergences are `mode seeking` i.e. they underestimate
variance and collapse on multimodal targets (`rKL`, `alpha` for alpha >
1) and some are `mass covering` i.e. they overestimate variance but
typically cover all modes (`fKL`, `IW`, `alpha` for alpha < 1).
device: Training device, e.g., `cpu`, `cuda` or `cuda:0`. We will ensure
that all other objects are also on this device.
x_shape: Deprecated, should not be passed.
parameters: List of parameters of the variational posterior. This is only
required for user-defined q i.e. if q does not have a `parameters`
attribute.
modules: List of modules of the variational posterior. This is only
required for user-defined q i.e. if q does not have a `modules`
attribute.
"""
super().__init__(potential_fn, theta_transform, device, x_shape=x_shape)
# Especially the prior may be on another device -> move it...
self._device = device
self.potential_fn.device = device
move_all_tensor_to_device(self.potential_fn, device)
# Get prior and previous builds
if prior is not None:
self._prior = prior
elif hasattr(self.potential_fn, "prior") and isinstance(
self.potential_fn.prior, Distribution
):
self._prior = self.potential_fn.prior
elif isinstance(q, VIPosterior) and isinstance(q._prior, Distribution):
self._prior = q._prior
else:
raise ValueError(
"We could not find a suitable prior distribution within `potential_fn` "
"or `q` (if a VIPosterior is given). Please explicitly specify a prior."
)
move_all_tensor_to_device(self._prior, device)
self._optimizer = None
# In contrast to MCMC we want to project into constrained space.
if theta_transform is None:
self.link_transform = mcmc_transform(self._prior).inv
else:
self.link_transform = theta_transform.inv
# This will set the variational distribution and VI method
self.set_q(q, parameters=parameters, modules=modules)
self.set_vi_method(vi_method)
self._purpose = (
"It provides Variational inference to .sample() from the posterior and "
"can evaluate the _normalized_ posterior density with .log_prob()."
)
@property
def q(self) -> Distribution:
"""Returns the variational posterior."""
return self._q
@q.setter
def q(
self,
q: Union[str, Distribution, "VIPosterior", Callable],
) -> None:
"""Sets the variational distribution. If the distribution does not admit access
through `parameters` and `modules` function, please use `set_q` if you want to
explicitly specify the parameters and modules.
Args:
q: Variational distribution, either string, distribution, or a VIPosterior
object. This specifies a parametric class of distribution over which
the best possible posterior approximation is searched. For string input,
we currently support [nsf, scf, maf, mcf, gaussian, gaussian_diag]. Of
course, you can also specify your own variational family by passing a
`parameterized` distribution object i.e. a torch.distributions
Distribution with methods `parameters` returning an iterable of all
parameters (you can pass them within the paramters/modules attribute).
Additionally, we allow a `Callable`, which allows you the pass a
`builder` function, which if called returns an distribution. This may be
useful for setting the hyperparameters e.g. `num_transfroms:int` by
using the `get_flow_builder` method specifying the hyperparameters. If q
is already a `VIPosterior`, then the arguments will be copied from it
(relevant for multi-round training).
"""
self.set_q(q)
def set_q(
self,
q: Union[str, PyroTransformedDistribution, "VIPosterior", Callable],
parameters: Iterable = [],
modules: Iterable = [],
) -> None:
"""Defines the variational family.
You can specify over which parameters/modules we optimize. This is required for
custom distributions which e.g. do not inherit nn.Modules or has the function
`parameters` or `modules` to give direct access to trainable parameters.
Further, you can pass a function, which constructs a variational distribution
if called.
Args:
q: Variational distribution, either string, distribution, or a VIPosterior
object. This specifies a parametric class of distribution over which
the best possible posterior approximation is searched. For string input,
we currently support [nsf, scf, maf, mcf, gaussian, gaussian_diag]. Of
course, you can also specify your own variational family by passing a
`parameterized` distribution object i.e. a torch.distributions
Distribution with methods `parameters` returning an iterable of all
parameters (you can pass them within the paramters/modules attribute).
Additionally, we allow a `Callable`, which allows you the pass a
`builder` function, which if called returns an distribution. This may be
useful for setting the hyperparameters e.g. `num_transfroms:int` by
using the `get_flow_builder` method specifying the hyperparameters. If q
is already a `VIPosterior`, then the arguments will be copied from it
(relevant for multi-round training).
parameters: List of parameters associated with the distribution object.
modules: List of modules associated with the distribution object.
"""
self._q_arg = (q, parameters, modules)
if isinstance(q, Distribution):
q = adapt_variational_distribution(
q,
self._prior,
self.link_transform,
parameters=parameters,
modules=modules,
)
make_object_deepcopy_compatible(q)
self_custom_q_init_cache = deepcopy(q)
self._q_build_fn = lambda *args, **kwargs: self_custom_q_init_cache
self._trained_on = None
elif isinstance(q, (str, Callable)):
if isinstance(q, str):
self._q_build_fn = get_flow_builder(q)
else:
self._q_build_fn = q
q = self._q_build_fn(
self._prior.event_shape,
self.link_transform,
device=self._device,
)
make_object_deepcopy_compatible(q)
self._trained_on = None
elif isinstance(q, VIPosterior):
self._q_build_fn = q._q_build_fn
self._trained_on = q._trained_on
self.vi_method = q.vi_method # type: ignore
self._device = q._device
self._prior = q._prior
self._x = q._x
self._q_arg = q._q_arg
make_object_deepcopy_compatible(q.q)
q = deepcopy(q.q)
move_all_tensor_to_device(q, self._device)
assert isinstance(
q, Distribution
), """Something went wrong when initializing the variational distribution.
Please create an issue on github https://github.com/mackelab/sbi/issues"""
check_variational_distribution(q, self._prior)
self._q = q
@property
def vi_method(self) -> str:
"""Variational inference method e.g. one of [rKL, fKL, IW, alpha]."""
return self._vi_method
@vi_method.setter
def vi_method(self, method: str) -> None:
"""See `set_vi_method`."""
self.set_vi_method(method)
def set_vi_method(self, method: str) -> "VIPosterior":
"""Sets variational inference method.
Args:
method: One of [rKL, fKL, IW, alpha].
Returns:
`VIPosterior` for chainable calls.
"""
self._vi_method = method
self._optimizer_builder = get_VI_method(method)
return self
def sample(
self,
sample_shape: Shape = torch.Size(),
x: Optional[Tensor] = None,
**kwargs,
) -> Tensor:
"""Samples from the variational posterior distribution.
Args:
sample_shape: Shape of samples
Returns:
Samples from posterior.
"""
x = self._x_else_default_x(x)
if self._trained_on is None or (x != self._trained_on).all():
raise AttributeError(
f"The variational posterior was not fit on the specified `default_x` "
f"{x}. Please train using `posterior.train()`."
)
samples = self.q.sample(torch.Size(sample_shape))
return samples.reshape((*sample_shape, samples.shape[-1]))
def sample_batched(
self,
sample_shape: Shape,
x: Tensor,
max_sampling_batch_size: int = 10000,
show_progress_bars: bool = True,
) -> Tensor:
raise NotImplementedError(
"Batched sampling is not implemented for VIPosterior. \
Alternatively you can use `sample` in a loop \
[posterior.sample(theta, x_o) for x_o in x]."
)
def log_prob(
self,
theta: Tensor,
x: Optional[Tensor] = None,
track_gradients: bool = False,
) -> Tensor:
r"""Returns the log-probability of theta under the variational posterior.
Args:
theta: Parameters
track_gradients: Whether the returned tensor supports tracking gradients.
This can be helpful for e.g. sensitivity analysis but increases memory
consumption.
Returns:
`len($\theta$)`-shaped log-probability.
"""
x = self._x_else_default_x(x)
if self._trained_on is None or (x != self._trained_on).all():
raise AttributeError(
f"The variational posterior was not fit using observation {x}.\
Please train."
)
with torch.set_grad_enabled(track_gradients):
theta = ensure_theta_batched(torch.as_tensor(theta))
return self.q.log_prob(theta)
def train(
self,
x: Optional[TorchTensor] = None,
n_particles: int = 256,
learning_rate: float = 1e-3,
gamma: float = 0.999,
max_num_iters: int = 2000,
min_num_iters: int = 10,
clip_value: float = 10.0,
warm_up_rounds: int = 100,
retrain_from_scratch: bool = False,
reset_optimizer: bool = False,
show_progress_bar: bool = True,
check_for_convergence: bool = True,
quality_control: bool = True,
quality_control_metric: str = "psis",
**kwargs,
) -> "VIPosterior":
"""This method trains the variational posterior.
Args:
x: The observation.
n_particles: Number of samples to approximate expectations within the
variational bounds. The larger the more accurate are gradient
estimates, but the computational cost per iteration increases.
learning_rate: Learning rate of the optimizer.
gamma: Learning rate decay per iteration. We use an exponential decay
scheduler.
max_num_iters: Maximum number of iterations.
min_num_iters: Minimum number of iterations.
clip_value: Gradient clipping value, decreasing may help if you see invalid
values.
warm_up_rounds: Initialize the posterior as the prior.
retrain_from_scratch: Retrain the variational distributions from scratch.
reset_optimizer: Reset the divergence optimizer
show_progress_bar: If any progress report should be displayed.
quality_control: If False quality control is skipped.
quality_control_metric: Which metric to use for evaluating the quality.
kwargs: Hyperparameters check corresponding `DivergenceOptimizer` for detail
eps: Determines sensitivity of convergence check.
retain_graph: Boolean which decides whether to retain the computation
graph. This may be required for some `exotic` user-specified q's.
optimizer: A PyTorch Optimizer class e.g. Adam or SGD. See
`DivergenceOptimizer` for details.
scheduler: A PyTorch learning rate scheduler. See
`DivergenceOptimizer` for details.
alpha: Only used if vi_method=`alpha`. Determines the alpha divergence.
K: Only used if vi_method=`IW`. Determines the number of importance
weighted particles.
stick_the_landing: If one should use the STL estimator (only for rKL,
IW, alpha).
dreg: If one should use the DREG estimator (only for rKL, IW, alpha).
weight_transform: Callable applied to importance weights (only for fKL)
Returns:
VIPosterior: `VIPosterior` (can be used to chain calls).
"""
# Update optimizer with current arguments.
if self._optimizer is not None:
self._optimizer.update({**locals(), **kwargs})
# Init q and the optimizer if necessary
if retrain_from_scratch:
self.q = self._q_build_fn() # type: ignore
self._optimizer = self._optimizer_builder(
self.potential_fn,
self.q,
lr=learning_rate,
clip_value=clip_value,
gamma=gamma,
n_particles=n_particles,
prior=self._prior,
**kwargs,
)
if (
reset_optimizer
or self._optimizer is None
or not isinstance(self._optimizer, self._optimizer_builder)
):
self._optimizer = self._optimizer_builder(
self.potential_fn,
self.q,
lr=learning_rate,
clip_value=clip_value,
gamma=gamma,
n_particles=n_particles,
prior=self._prior,
**kwargs,
)
# Check context
x = atleast_2d_float32_tensor(self._x_else_default_x(x)).to( # type: ignore
self._device
)
already_trained = self._trained_on is not None and (x == self._trained_on).all()
# Optimize
optimizer = self._optimizer
optimizer.to(self._device)
optimizer.reset_loss_stats()
if show_progress_bar:
iters = tqdm(range(max_num_iters))
else:
iters = range(max_num_iters)
# Warmup before training
if reset_optimizer or (not optimizer.warm_up_was_done and not already_trained):
if show_progress_bar:
iters.set_description( # type: ignore
"Warmup phase, this may take a few seconds..."
)
optimizer.warm_up(warm_up_rounds)
for i in iters:
optimizer.step(x)
mean_loss, std_loss = optimizer.get_loss_stats()
# Update progress bar
if show_progress_bar:
assert isinstance(iters, tqdm)
iters.set_description( # type: ignore
f"Loss: {np.round(float(mean_loss), 2)}"
f"Std: {np.round(float(std_loss), 2)}"
)
# Check for convergence
if check_for_convergence and i > min_num_iters and optimizer.converged():
if show_progress_bar:
print(f"\nConverged with loss: {np.round(float(mean_loss), 2)}")
break
# Training finished:
self._trained_on = x
# Evaluate quality
if quality_control:
try:
self.evaluate(quality_control_metric=quality_control_metric)
except Exception as e:
print(
f"Quality control showed a low quality of the variational "
f"posterior. We are automatically retraining the variational "
f"posterior from scratch with a smaller learning rate. "
f"Alternatively, if you want to skip quality control, please "
f"retrain with `VIPosterior.train(..., quality_control=False)`. "
f"\nThe error that occured is: {e}"
)
self.train(
learning_rate=learning_rate * 0.1,
retrain_from_scratch=True,
reset_optimizer=True,
)
return self
def evaluate(self, quality_control_metric: str = "psis", N: int = int(5e4)) -> None:
"""This function will evaluate the quality of the variational posterior
distribution. We currently support two different metrics of type `psis`, which
checks the quality based on the tails of importance weights (there should not be
much with a large one), or `prop` which checks the proportionality between q
and potential_fn.
NOTE: In our experience `prop` is sensitive to distinguish ``good`` from ``ok``
whereas `psis` is more sensitive in distinguishing `very bad` from `ok`.
Args:
quality_control_metric: The metric of choice, we currently support [psis,
prop, prop_prior].
N: Number of samples which is used to evaluate the metric.
"""
quality_control_fn, quality_control_msg = get_quality_metric(
quality_control_metric
)
metric = round(float(quality_control_fn(self, N=N)), 3)
print(f"Quality Score: {metric} " + quality_control_msg)
def map(
self,
x: Optional[TorchTensor] = None,
num_iter: int = 1_000,
num_to_optimize: int = 100,
learning_rate: float = 0.01,
init_method: Union[str, TorchTensor] = "proposal",
num_init_samples: int = 10_000,
save_best_every: int = 10,
show_progress_bars: bool = False,
force_update: bool = False,
) -> Tensor:
r"""Returns the maximum-a-posteriori estimate (MAP).
The method can be interrupted (Ctrl-C) when the user sees that the
log-probability converges. The best estimate will be saved in `self._map` and
can be accessed with `self.map()`. The MAP is obtained by running gradient
ascent from a given number of starting positions (samples from the posterior
with the highest log-probability). After the optimization is done, we select the
parameter set that has the highest log-probability after the optimization.
Warning: The default values used by this function are not well-tested. They
might require hand-tuning for the problem at hand.
For developers: if the prior is a `BoxUniform`, we carry out the optimization
in unbounded space and transform the result back into bounded space.
Args:
x: Deprecated - use `.set_default_x()` prior to `.map()`.
num_iter: Number of optimization steps that the algorithm takes
to find the MAP.
learning_rate: Learning rate of the optimizer.
init_method: How to select the starting parameters for the optimization. If
it is a string, it can be either [`posterior`, `prior`], which samples
the respective distribution `num_init_samples` times. If it is a
tensor, the tensor will be used as init locations.
num_init_samples: Draw this number of samples from the posterior and
evaluate the log-probability of all of them.
num_to_optimize: From the drawn `num_init_samples`, use the
`num_to_optimize` with highest log-probability as the initial points
for the optimization.
save_best_every: The best log-probability is computed, saved in the
`map`-attribute, and printed every `save_best_every`-th iteration.
Computing the best log-probability creates a significant overhead
(thus, the default is `10`.)
show_progress_bars: Whether to show a progressbar during sampling from
the posterior.
force_update: Whether to re-calculate the MAP when x is unchanged and
have a cached value.
log_prob_kwargs: Will be empty for SNLE and SNRE. Will contain
{'norm_posterior': True} for SNPE.
Returns:
The MAP estimate.
"""
self.proposal = self.q
return super().map(
x=x,
num_iter=num_iter,
num_to_optimize=num_to_optimize,
learning_rate=learning_rate,
init_method=init_method,
num_init_samples=num_init_samples,
save_best_every=save_best_every,
show_progress_bars=show_progress_bars,
force_update=force_update,
)
def __deepcopy__(self, memo: Optional[Dict] = None) -> "VIPosterior":
"""This method is called when using `copy.deepcopy` on the object.
It defines how the object is copied. We need to overwrite this method, since the
default implementation does use __getstate__ and __setstate__ which we overwrite
to enable pickling (and in particular the necessary modifications are
incompatible deep copying).
Args:
memo (Optional[Dict], optional): Deep copy internal memo. Defaults to None.
Returns:
VIPosterior: Deep copy of the VIPosterior.
"""
if memo is None:
memo = {}
# Create a new instance of the class
cls = self.__class__
result = cls.__new__(cls)
# Add to memo
memo[id(self)] = result
# Copy attributes
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __getstate__(self) -> Dict:
"""This method is called when pickling the object.
It defines what is pickled. We need to overwrite this method, since some parts
due not support pickle protocols (e.g. due to local functions, etc.).
Returns:
Dict: All attributes of the VIPosterior.
"""
self._optimizer = None
self.__deepcopy__ = None # type: ignore
self._q_build_fn = None
self._q.__deepcopy__ = None # type: ignore
return self.__dict__
def __setstate__(self, state_dict: Dict):
"""This method is called when unpickling the object.
Especially, we need to restore the removed attributes and ensure that the object
e.g. remains deep copy compatible.
Args:
state_dict: Given state dictionary, we will restore the object from it.
"""
self.__dict__ = state_dict
q = deepcopy(self._q)
# Restore removed attributes
self.set_q(*self._q_arg)
self._q = q
make_object_deepcopy_compatible(self)
make_object_deepcopy_compatible(self.q)
|
sbi-devREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@sbi@inference@posteriors@vi_posterior.py@.PATH_END.py
|
{
"filename": "plot3_fit_Trot-Tvib-molfrac.py",
"repo_name": "radis/radis",
"repo_path": "radis_extracted/radis-master/examples/3_Fitting/plot3_fit_Trot-Tvib-molfrac.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
================================================================================
Example #3: non-equilibrium spectrum (Tvib, Trot, x_CO)
================================================================================
With the new fitting module introduced in :py:func:`~radis.tools.new_fitting.fit_spectrum` function,
and in the example of Tgas fitting using new fitting module, we can see its 1-temperature fitting
performance for equilibrium condition.
This example features how new fitting module can fit non-equilibrium spectrum, with multiple fit
parameters, such as vibrational/rotational temperatures, or mole fraction, etc.
This is a real fitting case introduced Grimaldi's thesis: https://doi.org/10.2514/1.T6768 .
This case features CO molecule emitting on a wide range of spectrum. This case also includes
a user-defined trapezoid slit function, which is accounted by the distortion (dispersion) of the slit,
as a result of the influences from experimental parameters and spectrometer dispersion parameters
during the experiment.
"""
import astropy.units as u
import numpy as np
from radis import load_spec
from radis.test.utils import getTestFile
from radis.tools.new_fitting import fit_spectrum
# The following function and parameters reproduce the slit function in Dr. Corenti Grimaldi's thesis.
def slit_dispersion(w):
phi = -6.33
f = 750
gr = 300
m = 1
phi *= -2 * np.pi / 360
d = 1e-3 / gr
disp = (
w
/ (2 * f)
* (-np.tan(phi) + np.sqrt((2 * d / m / (w * 1e-9) * np.cos(phi)) ** 2 - 1))
)
return disp # nm/mm
slit = 1500 # µm
pitch = 20 # µm
top_slit_um = slit - pitch # µm
base_slit_um = slit + pitch # µm
center_slit = 5090
dispersion = slit_dispersion(center_slit)
top_slit_nm = top_slit_um * 1e-3 * dispersion
base_slit_nm = base_slit_um * 1e-3 * dispersion * 1.33
# -------------------- Step 1. Load experimental spectrum -------------------- #
specName = (
"Corentin_0_100cm_DownSampled_20cm_10pctCO2_1-wc-gw450-gr300-sl1500-acc5000-.spec"
)
s_experimental = load_spec(getTestFile(specName)).offset(-0.6, "nm")
# -------------------- Step 2. Fill ground-truths and data -------------------- #
# Experimental conditions which will be used for spectrum modeling. Basically, these are known ground-truths.
experimental_conditions = {
"molecule": "CO", # Molecule ID
"isotope": "1,2,3", # Isotope ID, can have multiple at once
"wmin": 2270
* u.nm, # Starting wavelength/wavenumber to be cropped out from the original experimental spectrum.
"wmax": 2400 * u.nm, # Ending wavelength/wavenumber for the cropping range.
"pressure": 1.01325
* u.bar, # Total pressure of gas, in "bar" unit by default, but you can also use Astropy units.
"path_length": 1
/ 195
* u.cm, # Experimental path length, in "cm" unit by default, but you can also use Astropy units.
"slit": { # Experimental slit. In simple form: "[value] [unit]", i.e. "-0.2 nm". In complex form: a dict with parameters of apply_slit()
"slit_function": (top_slit_nm, base_slit_nm),
"unit": "nm",
"shape": "trapezoidal",
"center_wavespace": center_slit,
"slit_dispersion": slit_dispersion,
"inplace": False,
},
"cutoff": 0, # (RADIS native) Discard linestrengths that are lower that this to reduce calculation time, in cm-1.
"databank": "hitemp", # Databank used for calculation. Must be stated.
}
# List of parameters to be fitted, accompanied by its initial value
fit_parameters = {
"Tvib": 6000, # Vibrational temperature, in K.
"Trot": 4000, # Rotational temperature, in K.
"mole_fraction": 0.05, # Species mole fraction, from 0 to 1.
}
# List of bounding ranges applied for those fit parameters above.
# You can skip this step and let it use default bounding ranges, but this is not recommended.
# Bounding range must be at format [<lower bound>, <upper bound>].
bounding_ranges = {
"Tvib": [2000, 7000],
"Trot": [2000, 7000],
"mole_fraction": [0, 0.1],
}
# Fitting pipeline setups.
fit_properties = {
"method": "lbfgsb", # Preferred fitting method. By default, "leastsq".
"fit_var": "radiance", # Spectral quantity to be extracted for fitting process, such as "radiance", "absorbance", etc.
"normalize": False, # Either applying normalization on both spectra or not.
"max_loop": 300, # Max number of loops allowed. By default, 200.
"tol": 1e-20, # Fitting tolerance, only applicable for "lbfgsb" method.
}
"""
For the fitting method, you can try one among 17 different fitting methods and algorithms of LMFIT,
introduced in `LMFIT method list <https://lmfit.github.io/lmfit-py/fitting.html#choosing-different-fitting-methods>`.
You can see the benchmark result of these algorithms here:
`RADIS Newfitting Algorithm Benchmark <https://github.com/radis/radis-benchmark/blob/master/manual_benchmarks/plot_newfitting_comparison_algorithm.py>`.
"""
# -------------------- Step 3. Run the fitting and retrieve results -------------------- #
# Conduct the fitting process!
s_best, result, log = fit_spectrum(
s_exp=s_experimental, # Experimental spectrum.
fit_params=fit_parameters, # Fit parameters.
bounds=bounding_ranges, # Bounding ranges for those fit parameters.
model=experimental_conditions, # Experimental ground-truths conditions.
pipeline=fit_properties, # # Fitting pipeline references.
)
# Now investigate the result logs for additional information about what's going on during the fitting process
print("\nResidual history: \n")
print(log["residual"])
print("\nFitted values history: \n")
for fit_val in log["fit_vals"]:
print(fit_val)
print("\nTotal fitting time: ")
print(log["time_fitting"], end=" s\n")
|
radisREPO_NAMEradisPATH_START.@radis_extracted@radis-master@examples@3_Fitting@plot3_fit_Trot-Tvib-molfrac.py@.PATH_END.py
|
{
"filename": "test_sky_coord_builder.py",
"repo_name": "astropy/pyvo",
"repo_path": "pyvo_extracted/pyvo-main/pyvo/mivot/tests/test_sky_coord_builder.py",
"type": "Python"
}
|
'''
The first service in operation that annotates query responses in the fly is Vizier
https://cds/viz-bin/mivotconesearch/VizierParams
Data are mapped on the mango:EpochPropagtion class as it is implemented in the current code.
This test case is based on 2 VOTables:
Both tests check the generation of SkyCoord instances from the MivotInstances built
for the output of this service.
'''
import pytest
from pyvo.mivot.version_checker import check_astropy_version
from pyvo.mivot.viewer.mivot_instance import MivotInstance
from pyvo.mivot.features.sky_coord_builder import SkyCoordBuilder
from pyvo.mivot.utils.exceptions import NoMatchingDMTypeError
# annotations generated by Vizier as given to the MivotInstance
vizier_dict = {
"dmtype": "mango:EpochPosition",
"longitude": {
"dmtype": "ivoa:RealQuantity",
"value": 52.26722684,
"unit": "deg",
"ref": "RAICRS",
},
"latitude": {
"dmtype": "ivoa:RealQuantity",
"value": 59.94033461,
"unit": "deg",
"ref": "DEICRS",
},
"pmLongitude": {
"dmtype": "ivoa:RealQuantity",
"value": -0.82,
"unit": "mas/yr",
"ref": "pmRA",
},
"pmLatitude": {
"dmtype": "ivoa:RealQuantity",
"value": -1.85,
"unit": "mas/yr",
"ref": "pmDE",
},
"epoch": {
"dmtype": "ivoa:RealQuantity",
"value": 1991.25,
"unit": "yr",
"ref": None,
},
"coordSys": {
"dmtype": "coords:SpaceSys",
"dmid": "SpaceFrame_ICRS",
"dmrole": "coords:Coordinate.coordSys",
"spaceRefFrame": {
"dmtype": "coords:SpaceFrame",
"value": "ICRS",
"unit": None,
"ref": None,
},
},
}
# The same edited by hand (parallax added and FK5 + Equinox frame)
vizier_equin_dict = {
"dmtype": "mango:EpochPosition",
"longitude": {
"dmtype": "ivoa:RealQuantity",
"value": 52.26722684,
"unit": "deg",
"ref": "RAICRS",
},
"latitude": {
"dmtype": "ivoa:RealQuantity",
"value": 59.94033461,
"unit": "deg",
"ref": "DEICRS",
},
"pmLongitude": {
"dmtype": "ivoa:RealQuantity",
"value": -0.82,
"unit": "mas/yr",
"ref": "pmRA",
},
"pmLatitude": {
"dmtype": "ivoa:RealQuantity",
"value": -1.85,
"unit": "mas/yr",
"ref": "pmDE",
},
"parallax": {
"dmtype": "ivoa:RealQuantity",
"value": 0.6,
"unit": "mas",
"ref": "parallax",
},
"epoch": {
"dmtype": "ivoa:RealQuantity",
"value": 1991.25,
"unit": "yr",
"ref": None,
},
"coordSys": {
"dmtype": "coords:SpaceSys",
"dmid": "SpaceFrame_ICRS",
"dmrole": "coords:Coordinate.coordSys",
"spaceRefFrame": {
"dmtype": "coords:SpaceFrame.spaceRefFrame",
"value": "FK5",
"unit": None,
"ref": None,
},
"equinox": {
"dmtype": "coords:SpaceFrame.equinox",
"value": "2012",
"unit": "yr",
},
},
}
# The same edited mapped on a dummy class
vizier_dummy_type = {
"dmtype": "mango:DumyType",
"longitude": {
"dmtype": "ivoa:RealQuantity",
"value": 52.26722684,
"unit": "deg",
"ref": "RAICRS",
},
"latitude": {
"dmtype": "ivoa:RealQuantity",
"value": 59.94033461,
"unit": "deg",
"ref": "DEICRS",
},
"pmLongitude": {
"dmtype": "ivoa:RealQuantity",
"value": -0.82,
"unit": "mas/yr",
"ref": "pmRA",
},
"pmLatitude": {
"dmtype": "ivoa:RealQuantity",
"value": -1.85,
"unit": "mas/yr",
"ref": "pmDE",
},
"parallax": {
"dmtype": "ivoa:RealQuantity",
"value": 0.6,
"unit": "mas",
"ref": "parallax",
},
"epoch": {
"dmtype": "ivoa:RealQuantity",
"value": 1991.25,
"unit": "yr",
"ref": None,
},
"coordSys": {
"dmtype": "coords:SpaceSys",
"dmid": "SpaceFrame_ICRS",
"dmrole": "coords:Coordinate.coordSys",
"spaceRefFrame": {
"dmtype": "coords:SpaceFrame.spaceRefFrame",
"value": "FK5",
"unit": None,
"ref": None,
},
"equinox": {
"dmtype": "coords:SpaceFrame.equinox",
"value": "2012",
"unit": "yr",
},
},
}
def test_no_matching_mapping():
"""
Test that a NoMatchingDMTypeError is raised not mapped on mango:EpochPosition
"""
with pytest.raises(NoMatchingDMTypeError):
mivot_instance = MivotInstance(**vizier_dummy_type)
scb = SkyCoordBuilder(mivot_instance.to_dict())
scb.build_sky_coord()
@pytest.mark.skipif(not check_astropy_version(), reason="need astropy 6+")
def test_vizier_output():
""" Test the SkyCoord issued from the Vizier response
"""
mivot_instance = MivotInstance(**vizier_dict)
scb = SkyCoordBuilder(mivot_instance.to_dict())
scoo = scb.build_sky_coord()
assert (str(scoo).replace("\n", "").replace(" ", "")
== "<SkyCoord (ICRS): (ra, dec) in deg(52.26722684, 59.94033461) "
"(pm_ra_cosdec, pm_dec) in mas / yr(-0.82, -1.85)>")
scoo = mivot_instance.get_SkyCoord()
assert (str(scoo).replace("\n", "").replace(" ", "")
== "<SkyCoord (ICRS): (ra, dec) in deg(52.26722684, 59.94033461) "
"(pm_ra_cosdec, pm_dec) in mas / yr(-0.82, -1.85)>")
vizier_dict["coordSys"]["spaceRefFrame"]["value"] = "Galactic"
mivot_instance = MivotInstance(**vizier_dict)
scoo = mivot_instance.get_SkyCoord()
assert (str(scoo).replace("\n", "").replace(" ", "")
== "<SkyCoord (Galactic): (l, b) in deg(52.26722684, 59.94033461) "
"(pm_l_cosb, pm_b) in mas / yr(-0.82, -1.85)>")
vizier_dict["coordSys"]["spaceRefFrame"]["value"] = "QWERTY"
mivot_instance = MivotInstance(**vizier_dict)
scoo = mivot_instance.get_SkyCoord()
assert (str(scoo).replace("\n", "").replace(" ", "")
== "<SkyCoord (ICRS): (ra, dec) in deg(52.26722684, 59.94033461) "
"(pm_ra_cosdec, pm_dec) in mas / yr(-0.82, -1.85)>")
@pytest.mark.skipif(not check_astropy_version(), reason="need astropy 6+")
def test_vizier_output_with_equinox_and_parallax():
"""Test the SkyCoord issued from the modofier Vizier response *
(parallax added and FK5 + Equinox frame)
"""
mivot_instance = MivotInstance(**vizier_equin_dict)
scb = SkyCoordBuilder(mivot_instance.to_dict())
scoo = scb.build_sky_coord()
assert (str(scoo).replace("\n", "").replace(" ", "")
== "<SkyCoord (FK5: equinox=J2012.000): (ra, dec, distance) in "
"(deg, deg, pc)(52.26722684, 59.94033461, 600.) "
"(pm_ra_cosdec, pm_dec) in mas / yr(-0.82, -1.85)>")
vizier_equin_dict["coordSys"]["spaceRefFrame"]["value"] = "FK4"
mivot_instance = MivotInstance(**vizier_equin_dict)
scoo = mivot_instance.get_SkyCoord()
assert (str(scoo).replace("\n", "").replace(" ", "")
== "<SkyCoord (FK4: equinox=B2012.000, obstime=J1991.250): (ra, dec, distance) in "
"(deg, deg, pc)(52.26722684, 59.94033461, 600.) "
"(pm_ra_cosdec, pm_dec) in mas / yr(-0.82, -1.85)>")
|
astropyREPO_NAMEpyvoPATH_START.@pyvo_extracted@pyvo-main@pyvo@mivot@tests@test_sky_coord_builder.py@.PATH_END.py
|
{
"filename": "julia_set.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/docs/source/mayavi/auto/julia_set.py",
"type": "Python"
}
|
"""
An example showing the Julia set displayed as a z-warped surface.
The Julia set is a fractal (see http://en.wikipedia.org/wiki/Julia_set
). We display it here in a canyon-like view using mlab's surf function:
:func:`mayavi.mlab.surf`.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from mayavi import mlab
import numpy as np
# Calculate the Julia set on a grid
x, y = np.ogrid[-1.5:0.5:500j, -1:1:500j]
z = x + 1j * y
julia = np.zeros(z.shape)
for i in range(50):
z = z ** 2 - 0.70176 - 0.3842j
julia += 1 / float(2 + i) * (z * np.conj(z) > 4)
# Display it
mlab.figure(size=(400, 300))
mlab.surf(julia, colormap='gist_earth', warp_scale='auto', vmax=1.5)
# A view into the "Canyon"
mlab.view(65, 27, 322, [30., -13.7, 136])
mlab.show()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@docs@source@mayavi@auto@julia_set.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/libs/lcms2/README.md",
"type": "Markdown"
}
|

# About Little CMS
[www.littlecms.com](https://www.littlecms.com)
Little CMS intends to be an **OPEN SOURCE** small-footprint color management engine, with special focus on accuracy and performance. It uses the International Color Consortium standard (ICC), which is the modern standard when regarding to color management. The ICC specification is widely used and is referred to in many International and other de-facto standards. It was approved as an International Standard, ISO 15076-1, in 2005.
# Conformance
Little CMS is a **FULL IMPLEMENTATION** of ICC specification 4.4, it fully supports all kind of V2 and V4 profiles, including abstract, devicelink and named color profiles. Check the tutorial for a exhaustive list of features.
# A bit of story
Since the initial release, back in 1998, Little CMS has grown to become one of the most popular open-source color management libraries, and has been used in a large number of production projects, in areas as printer firmware, monitors, digital cameras, RIPs, publishing, scientific, and many others. You can find Little CMS in most Linux distributions, and it's released under an open source license.
### Please see the complete documentation in doc folder
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@libs@lcms2@README.md@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/legend/grouptitlefont/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="layout.legend.grouptitlefont", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@legend@grouptitlefont@_shadow.py@.PATH_END.py
|
{
"filename": "sensitivity_cube.py",
"repo_name": "HETDEX/hetdex_api",
"repo_path": "hetdex_api_extracted/hetdex_api-master/hetdex_api/flux_limits/sensitivity_cube.py",
"type": "Python"
}
|
"""
Sensitivity Cube Reader
Read in Karl's sensitivity cubes and
produce expected detection fractions
from the Fleming (Fleming et al 1995)
parameterisation
References
----------
- Fleming et al. 1995
Fleming D.~E.~B., Harris W.~E., Pritchet C.~J., Hanes D.~A., 1995,
AJ, 109, 1044. doi:10.1086/117340
http://adsabs.harvard.edu/abs/1995AJ....109.1044F
- Donghui Jeong's explanation (internal to HETDEX)
https://luna.mpe.mpg.de/wikihetdex/index.php/Flim_files_and_Fleming_curve
.. moduleauthor:: Daniel Farrow <dfarrow@mpe.mpg.de>
"""
from __future__ import (absolute_import, print_function)
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from numpy import (rint, array, around, multiply, isnan, meshgrid, mean, isfinite,
median, sqrt, divide, linspace, ones, log10, loadtxt, polyval, inf,
repeat, newaxis, logical_not, arange, tile, nan, dstack, deg2rad,
cos, sin, zeros)
from numpy.ma import filled
from numpy.ma import array as maskedarray
from numpy.random import normal
from numpy import any as nany
from scipy.interpolate import interp1d, RegularGridInterpolator
import astropy.units as u
import astropy.io.fits as fits
from astropy.io.fits import Header
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from hetdex_api.flux_limits.flim_models import return_flux_limit_model
from hetdex_api.config import HDRconfig
class WavelengthException(Exception):
""" Exception to raise when suspicious wavelength is passed"""
pass
def fleming_function(flux, flim, alpha):
"""
Implementation of the Fleming et al 1995
completeness function
Parameters
----------
flux : float
flux of source
flim : float
flux at 50% completeness
alpha : float
parameter controlling
width of fall off
Returns
-------
val : float
Fleming+ 1995 function at this
flux
"""
fdiff = multiply(alpha, (-2.5*log10(flux) + 2.5*log10(flim)))
return 0.5*(1.0 + divide(fdiff, sqrt(1.0 + fdiff*fdiff)))
def read_cube(fn, datascale=1e-17):
"""
Read a Karl sensitivity cube and
return flux limits and WCS for
ra, dec conversions
Parameters
----------
fn : str
filename of cube to read
datascale : float
the scaling to apply to the inverse of the cube values (Optional,
default 1e-17)
Return
------
f50s : array
the datacube of the flux limits
header : dict
the header of the cube's
FITS file
"""
hdus = fits.open(fn)
f50s = datascale/(hdus[0].data)
header = hdus[0].header
return f50s, header
def create_sensitivity_cube_from_astrom(racen, deccen, pa, nx, ny, nz, ifusize,
wrange=[3470.0, 5542.0], **kwargs):
"""
Return an (empty) sensitivity cube object to fill
with data from simulations later
Parameters
----------
racen, deccen : float
the central coordinates of the IFU
pa : float
the IFU rotation
nx, ny, nz : int
the dimensions of the cube in ra, dec, wave
ifusize : float
the length of an IFU side in arcsec
wrange : array (optional)
the lower and upper wavelength
limits in Angstrom
***kwargs :
arguments to pass to SensitivityCube
"""
cards = {}
cards["NAXIS"] = 3
cards["NAXIS1"] = nx
cards["NAXIS2"] = ny
cards["NAXIS3"] = nz
cards["CTYPE1"] = "RA---TAN"
cards["CTYPE2"] = "DEC--TAN"
cards["CTYPE3"] = "Wave "
cards["CUNIT1"] = "deg "
cards["CUNIT2"] = "deg "
cards["CRPIX1"] = nx/2. + 0.5
cards["CRPIX2"] = ny/2. + 0.5
cards["CRPIX3"] = 1.0
coord = SkyCoord(racen*u.deg, deccen*u.deg)
cards["CRVAL1"] = racen #deg
cards["CRVAL2"] = deccen #deg
cards["CRVAL3"] = wrange[0] #AA
deltapix = (float(ifusize)/nx/3600.0)
# this is rotation in focal plane, maybe not the IFU
rot = deg2rad(pa)
cards["CROTA2"] = pa
cards["CD1_1"] = deltapix*cos(rot)
cards["CD1_2"] = deltapix*sin(rot)
cards["CD1_3"] = 0.0
cards["CD2_1"] = -1.0*deltapix*sin(rot)
cards["CD2_2"] = deltapix*cos(rot)
cards["CD2_3"] = 0.0
cards["CD3_1"] = 0.0
cards["CD3_2"] = 0.0
cards["CD3_3"] = (wrange[1] - wrange[0])/nz
header = Header(cards=cards)
sigmas = zeros((nz, ny, nx))
alphas = zeros((nz, ny, nx))
return SensitivityCube(sigmas, header, None, alphas, aper_corr=1.0,
nsigma=1.0, **kwargs)
class SensitivityCube(object):
"""
Deals with flux limit cubes
Parameters
----------
sigmas : array
3D datacube of datascale/noise
where noise is the noise on
a point source detection
header : dict
a dictionary of the headervalues to be stored in a
FITS file
wavelengths, alphas : array
arrays of the wavelength in
Angstrom and the alpha parameter
of the Fleming+ 1995 function
aper_corr : float (Optional)
Aperture correction to multiply
the cubes with. If None, read
from header. If not in header
and aper_corr=None do nothing.
Default is 1.0.
nsigma : float
If the cubes don't contain
1 sigma noise (e.g. in the HDR1
cubes it's 6 sigma) specify it here
flim_model : str
the name of the flux limit model
to use. If None then use the latest
(default)
mask : array (optional)
a spatial ra, dec mask with the same
WCS and dimensions as the data (default:
None)
cache_sim_interp : bool (optional)
cache the SimulationInterpolator,
so if you use another SensitivityCube
it will use the same model from before
(hdr2pt1pt1 or later only, only works
if you don't change flim_model)
Attributes
----------
sigmas : array
an array of the noise values
alpha_func : callable
returns the Fleming alpha
for an input wavelength
wcs : astropy.wcs:WCS
world coordinate system to convert between ra, dec, lambda
and pixel
f50_from_noise : callable
function that converts the values
in `sigmas` to flux values at
50% completeness
"""
def __init__(self, sigmas, header, wavelengths, alphas, aper_corr=1.0,
nsigma=1.0, flim_model=None, mask=None,
cache_sim_interp = True, verbose = False):
if type(mask) != type(None):
mask = logical_not(mask)
mask3d = repeat(mask[newaxis, :, :], sigmas.shape[0], axis=0)
self.sigmas = maskedarray(sigmas/nsigma, mask=mask3d, fill_value=999.0)
else:
self.sigmas = maskedarray(sigmas/nsigma, fill_value=999.0)
# collapse the data to create a continuum mask
self.collapsed_data = filled(self.sigmas, 0).sum(axis=0)
self.nsigma = nsigma
# Grab the flux limit model
self.f50_from_noise, self.sinterp, interp_sigmas \
= return_flux_limit_model(flim_model,
cache_sim_interp=cache_sim_interp,
verbose = verbose)
self.sigma_interpolate = None
if interp_sigmas:
indicesz = arange(self.sigmas.shape[0])
indicesy = arange(self.sigmas.shape[1])
indicesx = arange(self.sigmas.shape[2])
self.sigma_interpolate = RegularGridInterpolator((indicesz, indicesy, indicesx),
self.sigmas.filled(fill_value=nan),
fill_value = 999)
# Fix issue with header
if not "CD3_3" in header:
header["CD3_3"] = header["CDELT3"]
header["CD3_1"] = 0.0
header["CD3_2"] = 0.0
header["CD2_3"] = 0.0
header["CD1_3"] = 0.0
self.wcs = WCS(header)
self.header = header
# Deal with aperture corrections
if aper_corr:
self.aper_corr = aper_corr
elif "APCOR" in self.header:
self.aper_corr = self.header["APCOR"]
elif "APCOR0" in self.header:
self.aper_corr = self.header["APCOR0"]
else:
self.aper_corr = 1.0
self.sigmas = self.sigmas*self.aper_corr
self.alphas = array(alphas)
self.wavelengths = wavelengths
# Depends if alphas depend on wavelength or
# is specified per cube cell
if len(self.alphas.shape) == 3:
self.alpha_is_cube = True
else:
self.alpha_is_cube = False
self.alpha_func = interp1d(wavelengths, alphas,
fill_value="extrapolate")
def get_alpha(self, ra, dec, lambda_):
"""
Return the parameter controlling
the slope of the Fleming+ (1995) function
(only used for the old flux limit models)
"""
# If alpha is just an array versus wavelength
# return the value here
if not self.alpha_is_cube:
return self.alpha_func(lambda_)
# Alpha stored in a cube
ix, iy, iz = self.radecwltoxyz(ra, dec, lambda_)
# Check for stuff outside of cube
bad_vals = (ix >= self.alphas.shape[2]) | (ix < 0)
bad_vals = bad_vals | (iy >= self.alphas.shape[1]) | (iy < 0)
bad_vals = bad_vals | (iz >= self.alphas.shape[0]) | (iz < 0)
ix[(ix >= self.alphas.shape[2]) | (ix < 0)] = 0
iy[(iy >= self.alphas.shape[1]) | (iy < 0)] = 0
iz[(iz >= self.alphas.shape[0]) | (iz < 0)] = 0
alphas_here = self.alphas[iz, iy, ix]
# Support arrays and floats
try:
alphas_here[bad_vals] = 999.0
except TypeError:
if isnan(bad_vals):
aphas_here = 999.0
return alphas_here
@classmethod
def from_file(cls, fn_sensitivity_cube, wavelengths, alphas,
datascale=1e-17, **kwargs):
"""
Read in a sensitivity cube
from a file
Parameters
----------
fn_sensitivity_cube : str
the file name of a cube
containing the limiting
magnitude
wavelengths, alphas : array
arrays of the wavelength in
Angstrom and the alpha parameter
of the Fleming+ 1995 function
datascale : float (optional)
the values stored are
this_value/flim
**kwargs :
these are passed to the SensitivityCube init
"""
sigmas, header = read_cube(fn_sensitivity_cube, datascale=datascale)
return SensitivityCube(sigmas, header, wavelengths, alphas, **kwargs)
def apply_flux_recalibration(self, rescale, flux_calib_correction_file=None):
"""
Apply a recalibration of the fluxes to the
cube
Parameters
----------
rescale : float
value to multiply the flux limit cubes
to rescale
flux_calib_correction_file : str (optional)
filename containing a polynomial
fit (HETDEX - TRUTH)/HETDEX versus
wavelength to correct for
problems with the flux
calibration. Should be a polynomial
centered on 4600, i.e. input to
polyval(pvals, wl - 4600.0)
"""
if flux_calib_correction_file:
pvals = loadtxt(flux_calib_correction_file)
for iz in range(self.sigmas.shape[0]):
ra, dec, wl = self.wcs.wcs_pix2world(0, 0, iz, 0)
if wl < 3850.0:
wl = 3850.0
if flux_calib_correction_file:
self.sigmas[iz, :, :] = rescale*self.sigmas[iz, :, :]*(1.0 - polyval(pvals, wl - 4600.0))
else:
self.sigmas[iz, :, :] = rescale*self.sigmas[iz, :, :]
def radecwltoxyz(self, ra, dec, lambda_, round_=True):
"""
Convert ra, dec, wavelength position to
x,y, z coordinate of cube
Parameters
----------
ra, dec : arrays
right ascension &
declination of source
lambda_ : array
wavelength in Angstrom
round_ : bool
if true, round to nearest
integer (default is True)
Returns
-------
ix,iy,iz : arrays of int
indices of arrays for datacube
"""
lambda_ = array(lambda_)
ix,iy,iz = self.wcs.wcs_world2pix(ra, dec, lambda_, 0)
if round_:
return array(around(ix), dtype=int), array(around(iy), dtype=int), \
array(around(iz), dtype=int)
else:
return array(ix), array(iy), array(iz)
def get_average_f50(self, ra, dec, lambda_, sncut, npix=1):
"""
Get the maximum 50% completeness flux from the cube in
an npix box around and ra, dec, lambda
Parameters
----------
ra, dec : array
right ascension and dec in degrees
lambda_ : array
wavelength in Angstroms
sncut : float
cut in detection significance
that defines this catalogue
npix : int
the box will be 2*npix + 1 on
a side, i.e. number of pixels
around the position to
consider.
Returns
-------
f50s : array
max flux limits in cubes. If outside
of cube return 999
"""
ixc, iyc, izc = self.radecwltoxyz(ra, dec, lambda_)
na = int(2*npix + 1)
# [x1-1, x1, x1+1, x2-1, x2, x2+1, .....]
offsets = arange(-1.0*npix, npix + 1, 1, dtype=int)
ix = ixc.repeat(na) + tile(offsets, len(ixc))
# same x for all x, y in loop
ix = ix.repeat(na*na)
iy = iyc.repeat(na) + tile(offsets, len(iyc))
# same y for all z values in loop
iy = iy.repeat(na)
# tile full y-loop for each x-value
iy = tile(iy.reshape(len(iyc), na*na), na)
iy = iy.flatten()
# [z1-1, z1, z1+1, z2-1, z2, z2+1, .....]
iz = izc.repeat(len(offsets)) + tile(offsets, len(izc))
# z axis fastest repeating, tile z loop for every x and y value
iz = tile(iz.reshape(len(izc), na), na*na)
iz = iz.flatten()
# Check for stuff outside of cube
bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)
ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0
f50s = self.f50_from_noise(self.sigmas.filled()[iz, iy, ix], lambda_, sncut)
# Support arrays and floats
f50s[bad_vals] = 999.0
#print(ix)
#print(iy)
#print(iz)
# return the average flim in the area
f50s = f50s*f50s
return sqrt(f50s.reshape(len(ra), na*na*na).mean(axis=1))
def get_collapsed_value(self, ra, dec):
ix, iy, iz = self.radecwltoxyz(ra, dec, 4500.,
round_=True)
# Check for stuff outside of cube
bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
# XXX not using interpolation
noise = self.collapsed_data[iy, ix]
noise[bad_vals] = 999.0
return noise
def get_local_max_f50(self, ra, dec, lambda_, sncut, npix=1):
"""
Get the maximum 50% completeness flux from the cube in
an npix box around and ra, dec, lambda
Parameters
----------
ra, dec : array
right ascension and dec in degrees
lambda_ : array
wavelength in Angstroms
sncut : float
cut in detection significance
that defines this catalogue
npix : int
the box will be 2*npix + 1 on
a side, i.e. number of pixels
around the position to
consider.
Returns
-------
f50s : array
max flux limits in cubes. If outside
of cube return 999
"""
ixc, iyc, izc = self.radecwltoxyz(ra, dec, lambda_)
na = int(2*npix + 1)
# [x1-1, x1, x1+1, x2-1, x2, x2+1, .....]
offsets = arange(-1.0*npix, npix + 1, 1, dtype=int)
ix = ixc.repeat(na) + tile(offsets, len(ixc))
# same x for all x, y in loop
ix = ix.repeat(na*na)
iy = iyc.repeat(na) + tile(offsets, len(iyc))
# same y for all z values in loop
iy = iy.repeat(na)
# tile full y-loop for each x-value
iy = tile(iy.reshape(len(iyc), na*na), na)
iy = iy.flatten()
# [z1-1, z1, z1+1, z2-1, z2, z2+1, .....]
iz = izc.repeat(len(offsets)) + tile(offsets, len(izc))
# z axis fastest repeating, tile z loop for every x and y value
iz = tile(iz.reshape(len(izc), na), na*na)
iz = iz.flatten()
# Check for stuff outside of cube
bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)
ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0
f50s = self.f50_from_noise(self.sigmas.filled()[iz, iy, ix], lambda_, sncut)
# Support arrays and floats
f50s[bad_vals] = 999.0
#print(ix)
#print(iy)
#print(iz)
# return the max value in area
return f50s.reshape(len(ra), na*na*na).max(axis=1)
def get_f50(self, ra, dec, lambda_, sncut):
"""
Get 50% completeness flux from the cube at
ra, dec, lambda
Parameters
----------
ra, dec : array
right ascension and dec in degrees
lambda_ : array
wavelength in Angstroms
sncut : float
cut in detection significance
that defines this catalogue
Returns
-------
f50s : array
flux limits. If outside
of cube return 999
"""
if self.sigma_interpolate:
round_ = False
else:
round_ = True
ix, iy, iz = self.radecwltoxyz(ra, dec, lambda_,
round_ = round_)
# Check for stuff outside of cube
bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)
ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0
if self.sigma_interpolate:
coords = dstack((iz, iy, ix))[0]
noise = self.sigma_interpolate(coords)
else:
noise = self.sigmas.filled()[iz, iy, ix]
f50s = self.f50_from_noise(noise, lambda_, sncut)
# Support arrays and floats
try:
f50s[bad_vals] = 999.0
except TypeError:
if bad_vals:
f50s = 999.0
return f50s
def compute_snr(self, flux, ra, dec, lambda_):
"""
Compute the flux divided by the noise for
a given source.
Parameters
----------
flux : array
fluxes of objects
ra, dec : array
right ascension and dec in degrees
lambda_ : array
wavelength in Angstrom
Return
------
snr : array
signal divided by noise
"""
ix, iy, iz = self.radecwltoxyz(ra, dec, lambda_,
round_ = True)
# Check for stuff outside of cube
bad_vals = (ix >= self.sigmas.shape[2]) | (ix < 0)
bad_vals = bad_vals | (iy >= self.sigmas.shape[1]) | (iy < 0)
bad_vals = bad_vals | (iz >= self.sigmas.shape[0]) | (iz < 0)
ix[(ix >= self.sigmas.shape[2]) | (ix < 0)] = 0
iy[(iy >= self.sigmas.shape[1]) | (iy < 0)] = 0
iz[(iz >= self.sigmas.shape[0]) | (iz < 0)] = 0
# HERE
noise = self.sigmas.filled()[iz, iy, ix]
snr = flux/noise
# Support arrays and floats
try:
snr[bad_vals] = 0.0
except TypeError:
if isnan(snr):
snr = 0.0
return snr
def return_completeness(self, flux, ra, dec, lambda_, sncut):
"""
Return completeness at a 3D position as an array.
If for whatever reason the completeness is NaN, it's
replaced by 0.0.
Parameters
----------
flux : array
fluxes of objects
ra, dec : array
right ascension and dec in degrees
lambda_ : array
wavelength in Angstrom
sncut : float
the detection significance (S/N) cut
applied to the data
Return
------
fracdet : array
fraction detected
Raises
------
WavelengthException :
Annoys user if they pass
wavelength outside of
VIRUS range
"""
try:
if lambda_[0] < 3000.0 or lambda_[0] > 6000.0:
raise WavelengthException("""Odd wavelength value. Are you
sure it's in Angstrom?""")
except TypeError as e:
if lambda_ < 3000.0 or lambda_ > 6000.0:
raise WavelengthException("""Odd wavelength value. Are you
sure it's in Angstrom?""")
f50s = self.get_f50(ra, dec, lambda_, sncut)
if self.sinterp:
# interpolate over the simulation
fracdet = self.sinterp(flux, f50s, lambda_, sncut)
else:
alphas = self.get_alpha(ra, dec, lambda_)
fracdet = fleming_function(flux, f50s, alphas)
try:
fracdet[isnan(fracdet)] = 0.0
except TypeError:
if isnan(fracdet):
fracdet = 0.0
return fracdet
def return_wlslice_completeness(self, flux, lambda_low, lambda_high,
sncut, noise_cut=1e-15, pixlo=9,
pixhi=22, return_vals = False):
"""
Return completeness of a wavelength slice. NaN completeness
values are replaced with zeroes, noise values greater than
noise cut or NaN noise values are simply excluded from the
mean
Parameters
----------
flux : array
fluxes of objects
lambda_low, lambda_high : float
wavelength slice in Angstrom
(includes these slices)
sncut : float
the detection significance (S/N) cut
applied to the data
noise_cut : float
remove areas with more noise
than this. Default: 1e-16 erg/s/cm2
return_vals : bool (optional)
if true alse return an array
of the noise values
Return
------
fracdet : array
fraction detected in this slice
"""
if lambda_low < 3000.0 or lambda_low > 6000.0:
raise WavelengthException("""Odd wavelength value. Are you
sure it's in Angstrom?""")
ix, iy, izlo = self.radecwltoxyz(self.wcs.wcs.crval[0], self.wcs.wcs.crval[1], lambda_low)
ix, iy, izhigh = self.radecwltoxyz(self.wcs.wcs.crval[0], self.wcs.wcs.crval[1], lambda_high)
if izlo < 0:
print("Warning! Lower wavelength below range")
izlo = 0
if izhigh > self.sigmas.shape[0] - 1:
print("Warning! Upper wavelength above range")
izhigh = self.sigmas.shape[0] - 1
izlo = int(izlo)
izhigh = int(izhigh)
# remove pixel border and select wavelength slice
noise = self.sigmas.filled()[izlo:(izhigh + 1), pixlo:pixhi, pixlo:pixhi]
# Test what happens with fixed noise
#noise = noise*0 + normal(loc=1e-17, scale=2e-18,
# size=noise.shape[0]*noise.shape[1]).reshape(noise.shape[0], noise.shape[1])
# create a cube of the wavelengths
r, d, wl_1d = self.wcs.wcs_pix2world(ones(1 + izhigh - izlo), ones(1 + izhigh - izlo),
range(izlo, izhigh + 1), 0)
waves = wl_1d.repeat(noise.shape[1]*noise.shape[2])
try:
waves = waves.reshape(noise.shape)
except ValueError as e:
print(noise.shape)
print(len(wl_1d))
print(izlo, izhigh)
# remove masked data and bad data
sel = (noise < noise_cut) & isfinite(noise)
noise = noise[sel]
waves = waves[sel]
# Test for fixed lambda
# waves = waves*0 + lambda_low
if len(noise) == 0:
if return_vals:
return [], []
else:
return []
f50s = self.f50_from_noise(noise, waves, sncut)
if type(self.sinterp) == type(None):
if len(self.alphas.shape) > 1:
alphas = self.alphas[izlo:(izhigh + 1), :, :]
else:
# rough approximation to lambda varying across window
alphas = self.alpha_func(0.5*(lambda_low + lambda_high))
compls = []
for f in flux:
if self.sinterp:
compl = self.sinterp(f, f50s.flatten(), waves.flatten(), sncut)
else:
compl = fleming_function(f, f50s, alphas)
compl[isnan(compl)] = 0.0
# works so long as pixels equal area
if len(compl) > 0:
compls.append(mean(compl))
else:
compls.append(0.0)
if return_vals:
return array(compls), noise.flatten()
else:
return array(compls)
def return_wlslice_f50(self, lambda_low, lambda_high,
sncut, noise_cut=1e-16):
"""
Return flux at 50% completeness of a wavelength slice.
Parameters
----------
lambda_low, lambda_high : float
wavelength slice in Angstrom
(includes these slices)
sncut : float
the detection significance (S/N) cut
applied to the data
noise_cut : float (optional)
remove areas with more noise
than this. Default: 1e-16 erg/s/cm2
Return
------
f50 : float
the flux at 50% completeness
for the given ``sncut`` in this
wavelength slice
"""
try:
if lambda_low < 3000.0 or lambda_low > 6000.0:
raise WavelengthException("""Odd wavelength value. Are you
sure it's in Angstrom?""")
except ValueError:
if any(lambda_low < 3000.0) or any(lambda_low > 6000.0):
raise WavelengthException("""Odd wavelength value. Are you
sure it's in Angstrom?""")
ix, iy, izlo = self.radecwltoxyz(self.wcs.wcs.crval[0], self.wcs.wcs.crval[1], lambda_low)
ix, iy, izhigh = self.radecwltoxyz(self.wcs.wcs.crval[0], self.wcs.wcs.crval[1], lambda_high)
noise = self.sigmas.filled()[izlo:(izhigh + 1), :, :]
noise = noise[(noise < noise_cut) & (noise > 0)]
wl_mid = 0.5*(lambda_low + lambda_high)
f50 = self.f50_from_noise(median(noise), wl_mid, sncut)
return f50
def write(self, filename, datascale=1e-17, **kwargs):
"""
Write the sensitivity cube to a FITS file. If any
aperture correction was applied, this is removed
such that the saved data file should be identical
to the input (within numerical accuracy).
Parameters
----------
filename : str
Filename to write to
datascale : float
the scaling to apply to the
inverse of the cube values
(Optional, default 1e-17)
**kwargs :
passed to the astropy.io.fits:writeto
function
"""
fits.writeto(filename, self.aper_corr*datascale/self.sigmas.data,
header=self.header, **kwargs)
def plot_completeness(args=None):
"""
Plot the completeness curve at
a particular place
"""
import matplotlib as mpl
mpl.use("agg")
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description="""Plot the Fleming fit to
completeness""")
parser.add_argument("filename", type=str)
parser.add_argument("ra",
type=str,
help="RA of location to plot (HHhMMmSSs)")
parser.add_argument("dec",
type=str,
help="DEC of location to plot (DDdMMmSSs)")
parser.add_argument("lambda_", type=float, help="Wavelength to plot (A)")
parser.add_argument("alpha", type=float, help="Alpha for Fleming")
parser.add_argument("--sncut", type=float, default=4.5,
help="S/N cut used")
parser.add_argument("--fout", type=str, help="Filename to output to",
default=None)
opts = parser.parse_args(args=args)
coord = SkyCoord(opts.ra, opts.dec)
scube = SensitivityCube.from_file(opts.filename, [3500.0, 5500.0], [opts.alpha, opts.alpha])
f50 = scube.get_f50([coord.ra.deg], [coord.dec.deg], [opts.lambda_], opts.sncut)
fluxes = linspace(0.01*f50, 4*f50, 100)
compl = scube.return_completeness(array(fluxes), coord.ra.deg*ones(len(fluxes)),
coord.dec.deg*ones(len(fluxes)),
opts.lambda_*ones(len(fluxes)), opts.sncut)
plt.plot(fluxes/1e-17, compl, "r-")
plt.xlabel("Flux $10^{-17}\,$(erg/s/cm$^2$/A)", fontsize=14.0)
plt.ylabel("Completeness", fontsize=14.0)
if opts.fout:
plt.savefig(opts.fout)
else:
plt.show()
def plot_slice_of_cube(axes, scube, ramin, ramax, decmin, decmax,
wl, sncut, cmap="gist_rainbow", n=100, cax=None):
"""
Plot a slice of a sensitivity cube
Parameters
----------
axes : matplotlib.pyplot.axes
axes to put plot on
scube : SensitivityCube
cube to plot
wl : float
wavelength slice to plot
n : int
resolution
"""
rar = linspace(ramin, ramax, n)
decr = linspace(decmin, decmax, n)
ras, decs = meshgrid(rar, decr)
wls = wl*ones(len(ras))
flims = scube.get_f50(ras, decs, wls, sncut)
flims = 1e17*flims
img = flims.reshape(n,n)
im = axes.imshow(img, extent=[ramin, ramax, decmin, decmax], origin="lower left",
aspect="auto", cmap=cmap, vmin=0.0, vmax=30)
plt.colorbar(im, cax=cax, label="flux limit [10$^{-17}$ erg/s/cm$^{2}$]", pad=0.0)
axes.set_xlabel("RA (deg)")
axes.set_ylabel("Dec (deg)")
def plot_completeness_versus_wl(args=None):
"""
Plot the completeness curve versus wavelength
at a particular ra, dec
"""
import matplotlib as mpl
mpl.use("agg")
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description="""Plot the Fleming fit to
completeness""")
parser.add_argument("filename", type=str)
parser.add_argument("ra", type=str, help="RA of location to plot (HHhMMmSSs)")
parser.add_argument("dec", type=str, help="DEC of location to plot (DDdMMmSSs)")
#parser.add_argument("alphas", type=float, help="Alpha for Fleming")
parser.add_argument("--sncut", type=float, default=4.5,
help="S/N cut used")
parser.add_argument("--fout", type=str, help="Filename to output to",
default=None)
opts = parser.parse_args(args=args)
coord = SkyCoord(opts.ra, opts.dec)
print("WARNING using fixed alpha=-3.1")
scube = SensitivityCube.from_file(opts.filename, [3500.0, 5500.0], [-3.1, -3.1])
wls = linspace(3500, 5490.0, 1000)
f50 = scube.get_f50(coord.ra.deg*ones(len(wls)), coord.dec.deg*ones(len(wls)), wls, opts.sncut)
plt.plot(wls, f50/1e-16, "k-", label="Flux at 50% completeness")
plt.ylabel("Flux $10^{-16}\,$(erg/s/cm$^2$/A)", fontsize=14.0)
plt.xlabel("Wavelength (A)", fontsize=14.0)
plt.legend(loc="upper right")
if opts.fout:
plt.savefig(opts.fout)
else:
plt.show()
|
HETDEXREPO_NAMEhetdex_apiPATH_START.@hetdex_api_extracted@hetdex_api-master@hetdex_api@flux_limits@sensitivity_cube.py@.PATH_END.py
|
{
"filename": "_textcasesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/table/hoverlabel/font/_textcasesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcasesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="textcasesrc", parent_name="table.hoverlabel.font", **kwargs
):
super(TextcasesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@table@hoverlabel@font@_textcasesrc.py@.PATH_END.py
|
{
"filename": "core_correlation.py",
"repo_name": "leungcalvin/pyfx-public",
"repo_path": "pyfx-public_extracted/pyfx-public-main/src/pyfx/core_correlation.py",
"type": "Python"
}
|
"""
Fringestops station B to station A and cross correlates baseband data from station A and B.
The "core" module that should be called by the "outer layer" corr_job.py
All bugs are the responsibility of Shion Andrew
"""
import numpy as np
from astropy.time import Time, TimeDelta
from decimal import Decimal
import astropy.units as un
import time
from pyfx.fft_corr import basic_correlator #subframe_signal_to_noise_search_correlator as basic_correlator # could swap out correlator here
from pyfx import config
import collections
from pycalc11 import Calc
from baseband_analysis.core.bbdata import BBData
from typing import Optional, Tuple, Union
import logging
K_DM = 1 / 2.41e-4 # in s MHz^2 / (pc cm^-3)
MAX_FRAC_SAMP_LENGTH=32187 #maximum FFT length, chosen to keep delay rate drift (on Earth) within 1/10th of a frame
def autocorr_core(
DM: float,
bbdata_a: BBData,
t_a: np.ndarray,
window: np.ndarray,
R: np.ndarray,
max_lag: Optional[int]=None,
n_pol: int=2,
zp: bool=True,
) -> np.ndarray:
"""Auto-correlates data and downselects over lag.
Parameters
----------
DM : float
The DM with which the zeroth pointing of the data is de-smeared before the final gating. for continuum sources, set dispersion measure to 0.
bbdata_a : BBData object
At bare minimum, needs to have "tiedbeam_baseband" data of size (nfreq, npointing*npol, ntime).
t_a : np.ndarray of int of shape (nfreq, npointing, nscan).
start index of the integration, relative to bbdata_a['time0']['ctime'] in units of 2.56 microsec, as a function of frequency channels, pointing index, and time in units of :window: (i.e. scan number).
window : np.ndarray of int of shape (npointing, nscan).
duration of the scan, in units of 2.56 microsec, as a function of pointing and time (i.e. scan number).
R : np.ndarray of float of shape (nfreq, npointing, nscan).
Fraction R <= 1 of the scan, that gets down-selected before integration. In other words, we integrate between t_a + window // 2 +- r * window / 2
max_lag : int (Optional)
maximum (absolute value) lag (in frames) for auto-correlation (useful for very long time series data). TODO: Outer layer of the code should check that this is less than 1/2 of the window size times R.
set this to 20 for a good balance between space efficiency and good noise statistics.
n_pol : int
number of polarizations in data -- always 2.
Returns
-------
auto_vis - array of autocorrelations with shape (nfreq, npointing, npol, npol, 2 * nlag + 1, nscan)
"""
if max_lag is None: #set to default
max_lag = config.CHANNELIZATION['nlags']
n_freq = bbdata_a.nfreq
n_scan = np.size(t_a, axis=-1)
n_pointings = bbdata_a["tiedbeam_baseband"].shape[1] // n_pol
# convert all nans to 0s
bbdata_a['tiedbeam_baseband'][:]=np.nan_to_num(bbdata_a['tiedbeam_baseband'][:], nan=0, posinf=0, neginf=0)
vis_shape = (n_freq, n_pointings, n_pol, n_pol, 2 * max_lag + 1,n_scan)
auto_vis = np.zeros(vis_shape, dtype=bbdata_a['tiedbeam_baseband'].dtype)
f0 = bbdata_a.index_map["freq"]["centre"] #shape is (nfreq)
for kkpointing in range(n_pointings):
for jjscan in range(n_scan):
wij=int(window[kkpointing,jjscan])
t_a_indices = t_a[:, kkpointing,jjscan] # array of length 1024
## clip telescope A data ##
a_shape = list(bbdata_a['tiedbeam_baseband'][:,kkpointing:kkpointing+n_pol,:].shape)
a_shape[-1] = wij
clipped_a = np.zeros(a_shape, dtype=bbdata_a['tiedbeam_baseband'].dtype)
if len(np.unique(t_a_indices))==1 and not zp:
# go fast
clipped_a[:, ...] = bbdata_a['tiedbeam_baseband'][:,kkpointing:kkpointing+n_pol,
t_a_indices[0]:t_a_indices[0] + wij]
elif len(np.unique(t_a_indices)) > 1 and not zp:
# go slower
for i in range(len(t_a_indices)):
clipped_a[i, ...] = bbdata_a['tiedbeam_baseband'][i,kkpointing:kkpointing+n_pol,
t_a_indices[i]:t_a_indices[i] + wij]
elif zp:
for i in range(len(t_a_indices)):
for j in range(n_pol):
clipped_a[i, j, :] = getitem_zp1d(bbdata_a['tiedbeam_baseband'][i,2*kkpointing + j],
t_a_indices[i],
t_a_indices[i] + wij)
######### intrachannel de-dispersion ##################
scan_a_cd = intrachannel_dedisp(clipped_a, DM, f0=f0)
r_jjscan=R[:,kkpointing,jjscan] #np array of size (nfreq)
if len(np.unique(r_jjscan))==1:
r_ij=r_jjscan[0]
start = int((wij - wij*r_ij) // 2)
stop = int((wij + wij*r_ij) // 2)
#######################################################
########## auto-correlate the on-signal ##############
for pol_0 in range(n_pol):
for pol_1 in range(n_pol):
_vis = basic_correlator(
scan_a_cd[:, pol_0, start:stop],
scan_a_cd[:, pol_1, start:stop], full_output = False)
auto_vis[:, kkpointing, pol_0, pol_1,:,jjscan] = np.concatenate(
(_vis[:,:max_lag+1], _vis[:,-max_lag:]),axis=-1)
else:
for r_ij in r_jjscan:
start = int((wij - wij*r_ij) // 2)
stop = int((wij + wij*r_ij) // 2)
#######################################################
########## auto-correlate the on-signal ##############
for pol_0 in range(n_pol):
for pol_1 in range(n_pol):
if pol_0 == pol_1:
_vis = basic_correlator(
scan_a_cd[:, pol_0, start:stop],
scan_a_cd[:, pol_1, start:stop],full_output = False)
auto_vis[:, kkpointing, pol_0, pol_1,:,jjscan] = np.concatenate(
(_vis[:,:max_lag+1], _vis[:,-max_lag:]),axis=-1)
return auto_vis
def crosscorr_core(
bbdata_a: BBData,
bbdata_b: BBData,
t_a: np.ndarray,
window: np.ndarray,
R: np.ndarray,
pycalc_results: Calc,
DM: float,
index_A: int,
index_B: int,
max_lag: Optional[int]=None,
sample_rate: float=2.56,
n_pol: int=2,
complex_conjugate_convention: int=-1,
intra_channel_sign: int=1,
weight: Optional[np.ndarray]=None,
fast:bool=False,
zp: bool=True,
) -> np.ndarray:
"""Fringestops, coherently dedisperses, and cross correlates data
Parameters
----------
bbdata_a : BBData object
At bare minimum, needs to have "tiedbeam_baseband" data of size (nfreq, npointing*npol, ntime).
bbdata_b :
telescope B baseband data. Data must have matching index_map['freq'] as bbdata_a. index_map['freq']['centre'] must also be in MHz.
t_a : np.ndarray of int of shape (nfreq, npointing, nscan).
start index of the integration, relative to bbdata_a['time0']['ctime'] in units of 2.56 microsec, as a function of frequency channels, pointing index, and time in units of :window: (i.e. scan number).
window : np.ndarray of int of shape (npointing, nscan).
duration of the scan, in units of 2.56 microsec, as a function of pointing and time (i.e. scan number).
R : np.ndarray of float of shape (nfreq, npointing, nscan).
Fraction R <= 1 of the scan, that gets down-selected before integration. In other words, we integrate between t_a + window // 2 +- r * window / 2
DM : float
The DM with which the zeroth pointing of the data is de-smeared before the final gating. for continuum sources, set dispersion measure to 0.
n_pol : int
number of polarizations in data -- always 2.
pycalc_results :
pycalc11 Calc object, which is used to calculate geometric delays. Calc object should be initialized outside of this function and driver should have already been run (i.e. ci.run_driver())
index_A :
where telescope A corresponds to in pycalc_results.
index_B :
where telescope B corresponds to in pycalc_results.
max_lag : int (Optional)
maximum (absolute value) lag (in frames) for auto-correlation (useful for very long time series data). TODO: Outer layer of the code should check that this is less than 1/2 of the window size times R.
set this to 20 for a good balance between space efficiency and good noise statistics.
sample_rate :
rate at which data is sampled in microseconds
n_pol :
number of polarizations
complex conjugate convention :
should be a value of -1 if the baseband data is complex conjugated with respect to the sky, 1 otherwise
intra_channel_sign :
a sign to account for a reflection of frequencies about zero (e.g. in iq/baseband data). Should be -1 if frequencies within a channel are reflected about 0, 1 otherwise.
fast :
if False, use astropy addition (high precision but slow) and subtraction to evaluate the geodelays as a function of time.
If True, use float addition/subtraction; this should always be fine (picosecond precision) so long as pycalc_results.times[0] is within ~100s of all timestamps at which delays are to be evaluated.
weight :
array of shape (nfreq, nscan, npointings,ntime) that specifies what weighting to apply to the data **relative to the start time given by t_a**.
The shape of weight[:,jjscan,kkpointing] should be window[jjscan,kkpointing]
Outputs:
-------
cross_vis :
array of cross_vis correlation visibilities with shape (nfreq, npointing, npol, npol, nlag,nscan)
"""
if max_lag is None:
max_lag = config.CHANNELIZATION['nlags']
n_freq = len(bbdata_a.freq)
n_scan = np.size(t_a, axis=-1)
# SA: basing this off of how the data is arranged now, may want to change
n_pointings = bbdata_a["tiedbeam_baseband"].shape[1] // n_pol
n_freq_B=len(bbdata_b.freq)
assert n_freq_B==n_freq, f"There appear to be {n_freq} frequency channels in telescope A and {n_freq_B} frequency channels in telescope B. Please pass in these bbdata objects with frequency channels aligned (i.e. nth index along the frequency axis should correspond to the *same* channel in telescope A and B)"
vis_shape = (n_freq, n_pointings, n_pol, n_pol, 2 * max_lag + 1,n_scan)
cross_vis = np.zeros(vis_shape, dtype=bbdata_a['tiedbeam_baseband'].dtype)
f0 = bbdata_a.index_map["freq"]["centre"] #shape is (nfreq)
f0_b = bbdata_b.index_map["freq"]["centre"] #shape is (nfreq)
# convert all nans to 0s
bbdata_a['tiedbeam_baseband'][:]=np.nan_to_num(bbdata_a['tiedbeam_baseband'][:], nan=0, posinf=0, neginf=0)
bbdata_b['tiedbeam_baseband'][:]=np.nan_to_num(bbdata_b['tiedbeam_baseband'][:], nan=0, posinf=0, neginf=0)
for kkpointing in range(n_pointings):
for jjscan in range(n_scan):
wij=window[kkpointing,jjscan]
t_a_indices = t_a[:, kkpointing, jjscan] # array of length 1024
t0_a = bbdata_a["time0"]["ctime"][:]
# using telescope A times as reference time
t0_a_offset=bbdata_a["time0"]["ctime_offset"][:] + t_a_indices * (sample_rate*1e-6) # array of length 1024
#start time of reference frequency channel
ref_start_time = Time(
t0_a[0],
val2=t0_a_offset[0],
format="unix",
precision=9,
)
delta_ctime=t0_a-t0_a[0]
delta_ctime_offset=t0_a_offset-t0_a_offset[0] #difference between reference start time and nth freqeucny start time
if fast: #for the impatient
delta_t=delta_ctime+delta_ctime_offset
dt_vals=sample_rate * 1e-6 * np.arange(wij)+delta_t[:,np.newaxis] #nfreq,nframe
dt_vals0=(ref_start_time-pycalc_results.times[0]).sec #should always be <1s.
delays_flattened=pycalc_results.delays_dt(dt_vals0+dt_vals.flatten())
geodelays_flattened=delays_flattened[:,0,index_B,:]-delays_flattened[:,0,index_A,:] #units of seconds
geodelays = geodelays_flattened.reshape(dt_vals.shape)*1e6 #microseconds #nfreq,nframe
else: #for the paranoid
t0_a_offset=bbdata_a["time0"]["ctime_offset"][:] # array of length 1024
start_times = Time(
t0_a,
val2=t0_a_offset,
format="unix",
precision=9,
) #these are the pure ctime start times from the data
geodelays=np.zeros((1024,wij),dtype=float)
for i in range(n_freq):
# the times we want to query for each frequency is an array of length wij times ranging from (ctime start times + t_a, ctime start times + t_a +w_ij)
query_times = start_times[i] + sample_rate*1e-6 * un.s * (t_a_indices[i]+np.arange(wij))
delays=pycalc_results.interpolate_delays(query_times)
geodelays[i,:]=(delays[:,0,index_B,0]-delays[:,0,index_A,0])*1e6
# Fringestopping B -> A
scan_a, scan_b_fs = get_aligned_scans(
bbdata_a, bbdata_b, t_a_indices, wij, geodelays,
complex_conjugate_convention=complex_conjugate_convention, intra_channel_sign=intra_channel_sign, sample_rate=sample_rate,
npointing=kkpointing,n_pol=n_pol,zp=zp
)
#######################################################
######### intrachannel de-dispersion ##################
scan_a_cd = intrachannel_dedisp(scan_a, DM, f0=f0)
scan_b_fs_cd = intrachannel_dedisp(scan_b_fs, DM, f0=f0)
## weight the data
if type(weight)==np.ndarray:
scan_a_cd*=weight[:,np.newaxis,jjscan,kkpointing,:]
scan_b_fs_cd*=weight[:,np.newaxis,jjscan,kkpointing,:]
#######################################################
# Now that the pulses are centered at zero, calculate
### the start and stop time indices for on-signal ######
r_jjscan=R[:,kkpointing,jjscan] #np array of size (nfreq)
if len(np.unique(r_jjscan))==1:
#we can easily vectorize over frequency
r_ij=r_jjscan[0]
start = int((wij - wij*r_ij) // 2)
stop = int((wij + wij*r_ij) // 2)
if start==stop:
logging.warning("r_ij includes less than 1 frame: visibilities will be set to 0")
else:
#######################################################
########## cross-correlate the on-signal ##############
for pol_0 in range(n_pol):
for pol_1 in range(n_pol):
assert not np.isnan(np.min(scan_a_cd[:, pol_0, start:stop].flatten())), "Scan parameters have been poorly defined for telescope A. Please ensure there are no nans in the baseband data"
assert not np.isnan(np.min(scan_b_fs_cd[:, pol_0, start:stop].flatten())), "Scan parameters have been poorly defined for telescope B. Please ensure there are no nans in the baseband data"
_vis = basic_correlator(
scan_a_cd[:, pol_0, start:stop],
scan_b_fs_cd[:, pol_1, start:stop],
full_output = False)
cross_vis[:, kkpointing, pol_0, pol_1,:,jjscan] = np.concatenate(
(_vis[:,:max_lag+1], _vis[:,-max_lag:]),axis=-1)
else:
#loop over frequency channel
for freq in range(len(r_jjscan)):
r_ij=r_jjscan[freq]
start = int((wij - wij*r_ij) // 2)
stop = int((wij + wij*r_ij) // 2)
if start==stop:
logging.warning("r_ij includes less than 1 frame: visibilities for this channel will be set to 0")
else:
#######################################################
########## cross-correlate the on-signal ##############
for pol_0 in range(n_pol):
for pol_1 in range(n_pol):
assert not np.isnan(np.min(scan_a_cd[freq, pol_0, start:stop].flatten())), "Scan parameters have been poorly defined for telescope A. Please ensure there are no nans in the baseband data"
assert not np.isnan(np.min(scan_b_fs_cd[freq, pol_0, start:stop].flatten())), "Scan parameters have been poorly defined for telescope B. Please ensure there are no nans in the baseband data"
_vis = basic_correlator(
scan_a_cd[freq, pol_0, start:stop],
scan_b_fs_cd[freq, pol_1, start:stop],
full_output = False)
cross_vis[freq, kkpointing, pol_0, pol_1, :,jjscan] = np.concatenate(
(_vis[:max_lag+1], _vis[-max_lag:]),axis=-1)
return cross_vis
def getitem_zp1d(arr,start_want,stop_want):
"""Acts like arr[start_want:stop_want] but assumes start is strictly less than stop.
It returns output with the properties that
1) width = stop_want - start_want
2) as if bbdata were zero-padded on the left and right to negative and positive infinity.
Of course, no zero-padding actually takes place, to save memory. We implement this with casework:
All out: stop_want < start_have OR stop_have < start_want
We return all zeros
Half in, data late: start_want < start_have < stop_want < stop_have
We zero-pad at the start of the output.
Half in, data early: start_have < start_want < stop_have < stop_want
We zero-pad at the end of the output
All in : start_have < start_want < stop_want < stop_have -- easy peasy.
TODO: make this work over a given axis of an arbitrary np.ndarray
"""
width = stop_want - start_want
assert width >= 0, "Negative scan length not allowed; check your w_ij"
out = np.zeros(dtype = arr.dtype,shape = (width,))
start_have = 0
stop_have = arr.size
if stop_want < start_have or stop_have < start_want:
return out
if start_want < start_have <= stop_want <= stop_have:
nzeros = start_have - start_want # zero-pad at beginning of output array
samples_present = width - nzeros
out[nzeros : nzeros + samples_present] = arr[0:samples_present]
return out
if start_have <= start_want <= stop_have < stop_want:
nzeros = stop_want - stop_have # zero-pad at end of output array
out[0 : width - nzeros] = arr[start_want:stop_have]
return out
if start_want <= start_have < stop_have <= stop_want:
nzeros = start_have - start_want
samples_present = stop_have - start_have
out[nzeros : nzeros + samples_present] = arr
return out
else:
return arr[start_want:stop_want]
def get_aligned_scans(
bbdata_a: BBData,
bbdata_b: BBData,
t_a_index: np.ndarray,
wij: int,
tau: np.ndarray,
complex_conjugate_convention: int=-1,
intra_channel_sign: int=1,
sample_rate: float =2.56,
npointing:int=0,
n_pol:int=2,
zp:bool=True
) -> Tuple[np.ndarray,np.ndarray]:
"""For a single frequency corresponding to a given FPGA freq_id, returns aligned scans of data for that freq_id out of two provided BBData objects.
Inputs:
-------
bbdata_a : BBData
A BBData object, with arbitrary frequency coverage.
bbdata_b : BBData
A BBData object, with arbitrary frequency coverage. We apply a sub-frame phase rotation with fractional sample correction to data extracted out of bbdata_b.
t_a_index : np.array of shape (1024)
An array of indices corresponding to the start frames for telescope A
w_ij : int
window length. Should be an integer, and brownie points for a good FFT length.
tau : np.array (nfreq, n_frame) of dtype np.float
A delay in microseconds to apply to BBData_b, corresponding to the geometric delay.
The first index is the delay evaluated at time t_ij_a
freq_index : int
Outputs:
-------
aligned_a : np.array
A dual-pol scan of shape (2,w_ij)
aligned_b : np.array
A dual-pol scan of shape (2,w_ij)
Technical remarks on delay compensation:
On floor vs round: it doesn't matter AS LONG AS you do a sub-sample rotation (or better yet, a frac samp correction)! Suppose your total delay is 10.6 frames.
- You can round to 11 frames. You should keep track that you rounded to 11, and then do frac samp -0.4.
- You can floor to 10 frames, you should keep track that you floored to 10, and then do frac samp +0.6.
Answer should be the same either way -- as long as you do the frac samp correction!
After doing the integer part (shift by either 10 or 11 frames), we need to apply a phase rotation. Note that exp(2j*np.pi*channel_center * -0.4/(2.56us) = exp(2j*np.pi*channel_center * +0.6/(2.56us), for the exact frequency corresponding to channel center, but not for any of the other frequencies that do not satisfy f = 800 - (N * 0.390625 MHz) for integers N -- this is the narrowband approximation. We experience some de-correlation near the band edges, which is why we use fractional sample correction in this code.
"""
time_we_want_at_b = tau[:, 0] # us
a_shape = list(bbdata_a['tiedbeam_baseband'][:,npointing:npointing+n_pol,:].shape)
a_shape[-1] = wij
aligned_a = np.zeros(a_shape, dtype=bbdata_a['tiedbeam_baseband'].dtype)
# TODO vectorize
if len(np.unique(t_a_index))==1 and not zp:
aligned_a[:, ...] = bbdata_a['tiedbeam_baseband'][:,npointing:npointing+n_pol,
t_a_index[0]:t_a_index[0] + wij]
elif len(np.unique(t_a_index)) > 1 and not zp:
for i in range(len(t_a_index)):
aligned_a[i, ...] = bbdata_a['tiedbeam_baseband'][i,npointing:npointing+n_pol,
t_a_index[i]:t_a_index[i] + wij]
elif zp:
for i in range(len(t_a_index)):
for j in range(n_pol):
aligned_a[i,j,:] = getitem_zp1d(bbdata_a['tiedbeam_baseband'][i,j,:],t_a_index[i],t_a_index[i] + wij)
# aligned_a = bbdata_a['tiedbeam_baseband'][freq_id,...,t_a_index:t_a_index + wij]
# initialize aligned B array
aligned_b = np.zeros_like(aligned_a)
# calculate the additional offset between A and B in the event that the (samples points of) A and B are misaligned in absolute time by < 1 frame
# i.e. to correctly fringestop, we must also account for a case such as:
## A: |----|----|----|----|----| ##
## B: |----|----|----|----|----| ##
t_a= Time(
bbdata_a["time0"]["ctime"][:],
val2=bbdata_a["time0"]["ctime_offset"][:],
format="unix",
precision=9)
t_b= Time(
bbdata_b["time0"]["ctime"][:],
val2=bbdata_b["time0"]["ctime_offset"][:],
format="unix",
precision=9)
delta_A_B=(t_b-t_a).to_value('sec') #this is actually not that time consuming for 1024 frequencies
int_delay = np.array([int(np.round((timeb*1e-6 - delta) / (sample_rate*1e-6)))
for timeb, delta, in zip(time_we_want_at_b, delta_A_B)])
# frame number closest to start time
start_index_we_want_at_b = t_a_index+int_delay
if not zp:
# account for case where t_a_index+geodelay < 0 (i.e. signal arrives at telescope B before start of data acquision)
start_index_we_have_at_b = np.array(
[np.max([start, 0]) for start in start_index_we_want_at_b])
# if index_we_have_at_b is negative, this will be the amount we need to cushion our output data by
pad_index_b = start_index_we_have_at_b-start_index_we_want_at_b
# TODO vectorize -- for pad, start in zip(pad_index_b, start_index_we_have_at_b)] is slow
w_pad = wij - pad_index_b
ntime_start = bbdata_b.ntime - start_index_we_have_at_b
new_wij = np.minimum(w_pad, ntime_start)
new_wij = np.array([np.min([wij-pad, bbdata_b.ntime-start])
for pad, start in zip(pad_index_b, start_index_we_have_at_b)])
# if you are missing half the data, multiply by 2.
correction_factor =wij / new_wij
if correction_factor.any() > 2:
# warn the user that the boundary conditions are sketch if we are missing e.g. more than half the data.
logging.warning("based on specified start time and scan length, over half the data is missing from telescope XX.")
for i in range(len(pad_index_b)):
aligned_b[i, ..., pad_index_b[i]:pad_index_b[i]+new_wij[i]] = \
bbdata_b['tiedbeam_baseband'][i, ...,start_index_we_have_at_b[i]:start_index_we_have_at_b[i]+new_wij[i]] * correction_factor[i]
# multiply by the correction factor to ensure that a continuum source, when correlated, has the correct flux corresponding to the desired w_ij, even when we run out of data.
aligned_b = aligned_b[..., :wij]
elif zp:
for i in range(len(t_a_index)):
for j in range(n_pol):
aligned_b[i,j,:] = getitem_zp1d(bbdata_b['tiedbeam_baseband'][i,j,:],start_index_we_want_at_b[i],start_index_we_want_at_b[i] + wij)
time_we_have_at_b = (delta_A_B+int_delay*sample_rate*1e-6) # s
sub_frame_tau = np.array([tau[i, :wij] - time_b*1e6 for time_b, i in zip(
time_we_have_at_b, range(len(tau)))]) # sub-frame delay at start time in mircoseconds
aligned_b = frac_samp_shift(aligned_b,
f0=bbdata_b.index_map["freq"]["centre"][:],
sub_frame_tau=sub_frame_tau,
complex_conjugate_convention=complex_conjugate_convention,
intra_channel_sign=intra_channel_sign,
sample_rate=sample_rate)
return aligned_a, aligned_b
#### faster option is with gpus
def frac_samp_shift(
data:np.ndarray,
f0:np.ndarray,
sub_frame_tau:np.ndarray,
complex_conjugate_convention:int=-1,
intra_channel_sign:int=1,
sample_rate: float=2.56,
max_frames: int=MAX_FRAC_SAMP_LENGTH,
) -> np.ndarray:
"""
Coherently shifts data within a channel via a fractional phase shift of the form exp(2j*pi*f*sub_frame_tau).
Inputs:
-------
data : np.ndarray of shape (nfreq,npol*npointing,ntime)
f0 : frequency channel center.
sample_rate : sampling rate of data in microseconds
sub_frame_tau: np.array of shape (ntime), sub-frame delay in us
complex_conjugate_convention: a sign to account for the fact that the data may be complex conjugated
intra_channel_sign: a sign to account for a reflection of frequencies about zero (e.g. in iq/baseband data)
Outputs:
-------
np.ndarray of shape (nfreq,npol*npointing,ntime)
"""
# glorified element wise multiplication
#data will be of shape (nfreq,npol,ntime)
n = data.shape[-1]
if n<=max_frames:
f = np.fft.fftfreq(n, sample_rate)
# transfer_func is now of shape (nfreq,ntime)
transfer_func = np.exp(intra_channel_sign*2j * np.pi * f[np.newaxis,:] * np.median(sub_frame_tau,axis=-1)[:,np.newaxis]) # apply dphi/dfreq
frac_samp= np.fft.ifft(
np.fft.fft(data, axis=-1) * transfer_func[:,np.newaxis,], axis=-1
) * (np.exp(complex_conjugate_convention*2j * np.pi * f0[:,np.newaxis] * sub_frame_tau))[:, np.newaxis, :] # apply phi'
return frac_samp
else:
n1=n//2
data_chunk1=data[:,:,:n1]
data_chunk2=data[:,:,n1:]
sub_frame_tau1=sub_frame_tau[:,:n1]
sub_frame_tau2=sub_frame_tau[:,n1:]
fft_corrected_chunk1=frac_samp_shift(
data=data_chunk1,
f0=f0,
sub_frame_tau=sub_frame_tau1,
complex_conjugate_convention=complex_conjugate_convention,
intra_channel_sign=intra_channel_sign,
sample_rate=sample_rate,
max_frames=max_frames
)
fft_corrected_chunk2=frac_samp_shift(
data=data_chunk2,
f0=f0,
sub_frame_tau=sub_frame_tau2,
complex_conjugate_convention=complex_conjugate_convention,
intra_channel_sign=intra_channel_sign,
sample_rate=sample_rate,
max_frames=max_frames
)
return np.append(fft_corrected_chunk1,fft_corrected_chunk2,axis=-1)
def intrachannel_dedisp(
data: np.ndarray,
DM: float,
f0: np.ndarray,
sample_rate: float = 2.56
) -> np.ndarray:
"""Intrachannel dedispersion: brings data to center of channel.
This is Eq. 5.17 of Lorimer and Kramer 2004 textbook, but ONLY the last term (proportional to f^2), not the other two terms (independent of f and linearly proportional to f respectively).
Inputs:
-------
data : np.ndarray of shape (nfreq,npol*npointing,ntime)
f0 : np.ndarray of shape (nfreq) holding channel centers.
sample_rate : sampling rate of data in microseconds
Outputs:
-------
np.ndarray of shape (nfreq,npol*npointing,ntime)
"""
if DM==0: #save computation time
return data
else:
n = data.shape[-1]
f = np.fft.fftfreq(n,d = sample_rate)
transfer_func = np.exp(2j * np.pi * K_DM * DM * 1e6 * f[np.newaxis,:]**2 / f0[:,np.newaxis]**2 / (f[np.newaxis,:] + f0[:,np.newaxis]))
return np.fft.ifft(np.fft.fft(data, axis=-1) * transfer_func[:,np.newaxis,:],axis=-1)
|
leungcalvinREPO_NAMEpyfx-publicPATH_START.@pyfx-public_extracted@pyfx-public-main@src@pyfx@core_correlation.py@.PATH_END.py
|
{
"filename": "test_my_cmd.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/tests/unit/services/test_my_cmd.py",
"type": "Python"
}
|
import context
import my_cmd
import unittest
class MyCmdTest(unittest.TestCase):
"""Placeholder"""
if __name__ == '__main__':
import xmlrunner
runner = xmlrunner.XMLTestRunner(output='test-reports')
unittest.main(testRunner=runner)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@tests@unit@services@test_my_cmd.py@.PATH_END.py
|
{
"filename": "testFortran.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/tools/swig/test/testFortran.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# System imports
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0:
BadListError = TypeError
else:
BadListError = ValueError
import Fortran
######################################################################
class FortranTestCase(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap
def testSecondElementFortran(self):
"Test Fortran matrix initialized from reshaped NumPy fortranarray"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray(np.arange(9).reshape(3, 3),
self.typeCode)
self.assertEqual(second(matrix), 3)
def testSecondElementObject(self):
"Test Fortran matrix initialized from nested list fortranarray"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray([[0, 1, 2], [3, 4, 5], [6, 7, 8]], self.typeCode)
self.assertEqual(second(matrix), 3)
######################################################################
class scharTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
class ucharTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
class shortTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
######################################################################
class ushortTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
######################################################################
class intTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
######################################################################
class uintTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
######################################################################
class longTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
class ulongTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
class longLongTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
######################################################################
class ulongLongTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
######################################################################
class floatTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite( scharTestCase))
suite.addTest(unittest.makeSuite( ucharTestCase))
suite.addTest(unittest.makeSuite( shortTestCase))
suite.addTest(unittest.makeSuite( ushortTestCase))
suite.addTest(unittest.makeSuite( intTestCase))
suite.addTest(unittest.makeSuite( uintTestCase))
suite.addTest(unittest.makeSuite( longTestCase))
suite.addTest(unittest.makeSuite( ulongTestCase))
suite.addTest(unittest.makeSuite( longLongTestCase))
suite.addTest(unittest.makeSuite(ulongLongTestCase))
suite.addTest(unittest.makeSuite( floatTestCase))
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
print("Testing 2D Functions of Module Matrix")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@tools@swig@test@testFortran.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/acspy/src/Acspy/Clients/__init__.py",
"type": "Python"
}
|
'''
The Clients subpackage exists for one purpose - providing implementations
of the MACI Client IDL interface for Python. Most developers will only care
about looking at the SimpleClient module as it contains the documentation for
PySimpleClient, but the BaseClient module can be quite interesting also. The
final thing to note is developers should also look over the documentation for
the ContainerServices module in the Servants subpackage as PySimpleClient is
derived from an extremely useful class there.
'''
__revision__ = "$Id: __init__.py,v 1.4 2005/02/23 00:04:55 dfugate Exp $"
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@acspy@src@Acspy@Clients@__init__.py@.PATH_END.py
|
{
"filename": "interface.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/sparse/linalg/interface.py",
"type": "Python"
}
|
"""Abstract linear algebra library.
This module defines a class hierarchy that implements a kind of "lazy"
matrix representation, called the ``LinearOperator``. It can be used to do
linear algebra with extremely large sparse or structured matrices, without
representing those explicitly in memory. Such matrices can be added,
multiplied, transposed, etc.
As a motivating example, suppose you want have a matrix where almost all of
the elements have the value one. The standard sparse matrix representation
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
able to represent such matrices efficiently. First, we need a compact way to
represent an all-ones matrix::
>>> import numpy as np
>>> class Ones(LinearOperator):
... def __init__(self, shape):
... super(Ones, self).__init__(dtype=None, shape=shape)
... def _matvec(self, x):
... return np.repeat(x.sum(), self.shape[0])
Instances of this class emulate ``np.ones(shape)``, but using a constant
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
how this linear operator multiplies with (operates on) a vector. We can now
add this operator to a sparse matrix that stores only offsets from one::
>>> from scipy.sparse import csr_matrix
>>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
>>> A.dot([1, 2, 3])
array([13, 4, 15])
The result is the same as that given by its dense, explicitly-stored
counterpart::
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
array([13, 4, 15])
Several algorithms in the ``scipy.sparse`` library are able to operate on
``LinearOperator`` instances.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import isspmatrix
from scipy.sparse.sputils import isshape, isintlike
__all__ = ['LinearOperator', 'aslinearoperator']
class LinearOperator(object):
"""Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete LinearOperator, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions (M,N).
matvec : callable f(v)
Returns returns A * v.
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N,K).
dtype : dtype
Data type of the matrix.
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import LinearOperator
>>> def mv(v):
... return np.array([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=float64>
>>> A.matvec(np.ones(2))
array([ 2., 3.])
>>> A * np.ones(2)
array([ 2., 3.])
"""
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return _CustomLinearOperator(*args, **kwargs)
else:
obj = super(LinearOperator, cls).__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
raise TypeError("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.")
return obj
def __init__(self, dtype, shape):
"""Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
"""
if dtype is not None:
dtype = np.dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError("invalid shape %r (must be 2-d)" % (shape,))
self.dtype = dtype
self.shape = shape
def _init_dtype(self):
"""Called from subclasses at the end of the __init__ routine.
"""
if self.dtype is None:
v = np.zeros(self.shape[-1])
self.dtype = np.asarray(self.matvec(v)).dtype
def _matmat(self, X):
"""Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
def _matvec(self, x):
"""Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(x.reshape(-1, 1))
def matvec(self, x):
"""Matrix-vector multiplication.
Performs the operation y=A*x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (N,) and x.shape != (N,1):
raise ValueError('dimension mismatch')
y = self._matvec(x)
if isinstance(x, np.matrix):
y = np.asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(M)
elif x.ndim == 2:
y = y.reshape(M,1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, x):
"""Adjoint matrix-vector multiplication.
Performs the operation y = A^H * x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the x argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (M,) and x.shape != (M,1):
raise ValueError('dimension mismatch')
y = self._rmatvec(x)
if isinstance(x, np.matrix):
y = np.asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(N)
elif x.ndim == 2:
y = y.reshape(N,1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, x):
"""Default implementation of _rmatvec; defers to adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
raise NotImplementedError
else:
return self.H.matvec(x)
def matmat(self, X):
"""Matrix-matrix multiplication.
Performs the operation y=A*X where A is an MxN linear
operator and X dense N*K matrix or ndarray.
Parameters
----------
X : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the X argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
% X.ndim)
M,N = self.shape
if X.shape[0] != N:
raise ValueError('dimension mismatch: %r, %r'
% (self.shape, X.shape))
Y = self._matmat(X)
if isinstance(Y, np.matrix):
Y = np.asmatrix(Y)
return Y
def __call__(self, x):
return self*x
def __mul__(self, x):
return self.dot(x)
def dot(self, x):
"""Matrix-matrix or matrix-vector multiplication.
Parameters
----------
x : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Ax : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(x, LinearOperator):
return _ProductLinearOperator(self, x)
elif np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
x = np.asarray(x)
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
return self.matvec(x)
elif x.ndim == 2:
return self.matmat(x)
else:
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
% x)
def __matmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
return NotImplemented
def __pow__(self, p):
if np.isscalar(p):
return _PowerLinearOperator(self, p)
else:
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
else:
return NotImplemented
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M,N = self.shape
if self.dtype is None:
dt = 'unspecified dtype'
else:
dt = 'dtype=' + str(self.dtype)
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
def adjoint(self):
"""Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
H = property(adjoint)
def transpose(self):
"""Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
T = property(transpose)
def _adjoint(self):
"""Default implementation of _adjoint; defers to rmatvec."""
shape = (self.shape[1], self.shape[0])
return _CustomLinearOperator(shape, matvec=self.rmatvec,
rmatvec=self.matvec,
dtype=self.dtype)
class _CustomLinearOperator(LinearOperator):
"""Linear operator defined in terms of user-specified operations."""
def __init__(self, shape, matvec, rmatvec=None, matmat=None, dtype=None):
super(_CustomLinearOperator, self).__init__(dtype, shape)
self.args = ()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__matmat_impl = matmat
self._init_dtype()
def _matmat(self, X):
if self.__matmat_impl is not None:
return self.__matmat_impl(X)
else:
return super(_CustomLinearOperator, self)._matmat(X)
def _matvec(self, x):
return self.__matvec_impl(x)
def _rmatvec(self, x):
func = self.__rmatvec_impl
if func is None:
raise NotImplementedError("rmatvec is not defined")
return self.__rmatvec_impl(x)
def _adjoint(self):
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
matvec=self.__rmatvec_impl,
rmatvec=self.__matvec_impl,
dtype=self.dtype)
def _get_dtype(operators, dtypes=None):
if dtypes is None:
dtypes = []
for obj in operators:
if obj is not None and hasattr(obj, 'dtype'):
dtypes.append(obj.dtype)
return np.find_common_type(dtypes, [])
class _SumLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError('cannot add %r and %r: shape mismatch'
% (A, B))
self.args = (A, B)
super(_SumLinearOperator, self).__init__(_get_dtype([A, B]), A.shape)
def _matvec(self, x):
return self.args[0].matvec(x) + self.args[1].matvec(x)
def _rmatvec(self, x):
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
def _matmat(self, x):
return self.args[0].matmat(x) + self.args[1].matmat(x)
def _adjoint(self):
A, B = self.args
return A.H + B.H
class _ProductLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape[1] != B.shape[0]:
raise ValueError('cannot multiply %r and %r: shape mismatch'
% (A, B))
super(_ProductLinearOperator, self).__init__(_get_dtype([A, B]),
(A.shape[0], B.shape[1]))
self.args = (A, B)
def _matvec(self, x):
return self.args[0].matvec(self.args[1].matvec(x))
def _rmatvec(self, x):
return self.args[1].rmatvec(self.args[0].rmatvec(x))
def _matmat(self, x):
return self.args[0].matmat(self.args[1].matmat(x))
def _adjoint(self):
A, B = self.args
return B.H * A.H
class _ScaledLinearOperator(LinearOperator):
def __init__(self, A, alpha):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if not np.isscalar(alpha):
raise ValueError('scalar expected as alpha')
dtype = _get_dtype([A], [type(alpha)])
super(_ScaledLinearOperator, self).__init__(dtype, A.shape)
self.args = (A, alpha)
def _matvec(self, x):
return self.args[1] * self.args[0].matvec(x)
def _rmatvec(self, x):
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
def _matmat(self, x):
return self.args[1] * self.args[0].matmat(x)
def _adjoint(self):
A, alpha = self.args
return A.H * alpha
class _PowerLinearOperator(LinearOperator):
def __init__(self, A, p):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if A.shape[0] != A.shape[1]:
raise ValueError('square LinearOperator expected, got %r' % A)
if not isintlike(p) or p < 0:
raise ValueError('non-negative integer expected as p')
super(_PowerLinearOperator, self).__init__(_get_dtype([A]), A.shape)
self.args = (A, p)
def _power(self, fun, x):
res = np.array(x, copy=True)
for i in range(self.args[1]):
res = fun(res)
return res
def _matvec(self, x):
return self._power(self.args[0].matvec, x)
def _rmatvec(self, x):
return self._power(self.args[0].rmatvec, x)
def _matmat(self, x):
return self._power(self.args[0].matmat, x)
def _adjoint(self):
A, p = self.args
return A.H ** p
class MatrixLinearOperator(LinearOperator):
def __init__(self, A):
super(MatrixLinearOperator, self).__init__(A.dtype, A.shape)
self.A = A
self.__adj = None
self.args = (A,)
def _matmat(self, X):
return self.A.dot(X)
def _adjoint(self):
if self.__adj is None:
self.__adj = _AdjointMatrixOperator(self)
return self.__adj
class _AdjointMatrixOperator(MatrixLinearOperator):
def __init__(self, adjoint):
self.A = adjoint.A.T.conj()
self.__adjoint = adjoint
self.args = (adjoint,)
self.shape = adjoint.shape[1], adjoint.shape[0]
@property
def dtype(self):
return self.__adjoint.dtype
def _adjoint(self):
return self.__adjoint
class IdentityOperator(LinearOperator):
def __init__(self, shape, dtype=None):
super(IdentityOperator, self).__init__(dtype, shape)
def _matvec(self, x):
return x
def _rmatvec(self, x):
return x
def _matmat(self, x):
return x
def _adjoint(self):
return self
def aslinearoperator(A):
"""Return A as a LinearOperator.
'A' may be any of the following types:
- ndarray
- matrix
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
- LinearOperator
- An object with .shape and .matvec attributes
See the LinearOperator documentation for additional information.
Examples
--------
>>> from scipy.sparse.linalg import aslinearoperator
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
>>> aslinearoperator(M)
<2x3 MatrixLinearOperator with dtype=int32>
"""
if isinstance(A, LinearOperator):
return A
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
if A.ndim > 2:
raise ValueError('array must have ndim <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif isspmatrix(A):
return MatrixLinearOperator(A)
else:
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
rmatvec = None
dtype = None
if hasattr(A, 'rmatvec'):
rmatvec = A.rmatvec
if hasattr(A, 'dtype'):
dtype = A.dtype
return LinearOperator(A.shape, A.matvec,
rmatvec=rmatvec, dtype=dtype)
else:
raise TypeError('type not understood')
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@sparse@linalg@interface.py@.PATH_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnel/marker/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(self, plotly_name="colorscale", parent_name="funnel.marker", **kwargs):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnel@marker@_colorscale.py@.PATH_END.py
|
{
"filename": "definitions.py",
"repo_name": "hgrecco/pint",
"repo_path": "pint_extracted/pint-master/pint/definitions.py",
"type": "Python"
}
|
"""
pint.definitions
~~~~~~~~~~~~~~~~
Kept for backwards compatibility
:copyright: 2022 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import annotations
import flexparser as fp
from . import errors
from .delegates import ParserConfig, txt_defparser
class Definition:
"""This is kept for backwards compatibility"""
@classmethod
def from_string(cls, input_string: str, non_int_type: type = float) -> Definition:
"""Parse a string into a definition object.
Parameters
----------
input_string
Single line string.
non_int_type
Numerical type used for non integer values.
Raises
------
DefinitionSyntaxError
If a syntax error was found.
"""
cfg = ParserConfig(non_int_type)
parser = txt_defparser.DefParser(cfg, None)
pp = parser.parse_string(input_string)
for definition in parser.iter_parsed_project(pp):
if isinstance(definition, Exception):
raise errors.DefinitionSyntaxError(str(definition))
if not isinstance(definition, (fp.BOS, fp.BOF, fp.BOS)):
return definition
# TODO: What shall we do in this return path.
|
hgreccoREPO_NAMEpintPATH_START.@pint_extracted@pint-master@pint@definitions.py@.PATH_END.py
|
{
"filename": "_fillcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/_fillcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="fillcolor", parent_name="scattermap", **kwargs):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@_fillcolor.py@.PATH_END.py
|
{
"filename": "2396-rnn.md",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/docs_nnx/flip/2396-rnn.md",
"type": "Markdown"
}
|
# RNN Flip
- Start Date: 2022-08-18
- FLIP PR: [#2604](https://github.com/google/flax/pull/2604)
- FLIP Issue: [#2396](https://github.com/google/flax/issues/2396)
- Authors: Jasmijn Bastings (@bastings) and Cristian Garcia (@cgarciae)
## Summary
This FLIP adds support for higher-level recurrent layers (RNN, GRU, LSTM) that can help users process input sequences using the recurrent cells already available in Flax.
## Motivation
Implementing well known recurrent architectures is tricky and prone to user errors, even a simple LSTM layers involves the manual creation and handling of the carry/memory and correctly setting up `nn.scan`:
```python
@nn.compact
def __call__(self, x):
LSTM = nn.scan(
nn.LSTMCell, variable_broadcast="params", split_rngs={"params": False}
)
carry = LSTM.initialize_carry(
jax.random.key(0), batch_dims=x.shape[:1], size=self.hidden_size
)
carry, x = LSTM()(carry, x)
return x
```
Slightly more complicated cases involving padding like in the [seq2seq](https://github.com/google/flax/blob/main/examples/seq2seq/models.py) example require even more work but couple potentially be simplified to a couple of lines with the right abstractions. We propose providing users with clean, correct, and efficient abstractions to use recurrent cells.
## Requirements
* **Masking**: We need to support a batch of sequences that contain padding at the end of each sequence.
* We do not intend to support non-contiguous padding, i.e. padding that is not at the end of a sequence, for performance reasons, except in the case of packing (see below).
* **Bidirectionality**: The ability to process a sequence in both the forward and reverse directions, respecting padding (i.e., the reverse direction should start with the actual inputs, not with padding values).
* **Performance**: The proposed classes should be benchmarked to provide the best performance in terms of step time and/or memory use.
* **Recurrent Dropout**: Support for recurrent dropout in cells (e.g. dropout on the state of the cell).
## Implementation
### High-level structure
We propose to have these 3 levels of abstraction:
* **Cells (unchanged)**: all RNNCellBase subclasses such as LSTMCell and GRUCell, these implement the stepwise logic. These already exist in Flax today.
* **Layers (new)**: a class (RNN) that takes a cell and scans over a sequence respecting possible padding values and optionally also allows packed sequences.
* **Bidirectional (new)**: a single class that takes a forward and a backward RNN instance and correctly processes the input sequence in both directions and merges the results.
### Example of proposed API
We start with a code example of what you could do with the proposed API, and then we discuss the API in detail below.
```python
cell = nn.LSTMCell()
# Encodes a batch of input sequences.
carry, outputs = nn.RNN(cell, cell_size)(inputs, seq_lengths)
```
A Bidirectional layer with a LSTM RNNs for the forward and backward directions respectively would look like this:
```python
forward_rnn = nn.RNN(nn.LSTMCell(), cell_size=32)
backward_rnn = nn.RNN(nn.LSTMCell(), cell_size=32)
# Bidirectional combinator.
bi_rnn = nn.Bidirectional(forward_rnn, backward_rnn)
# Encodes a batch of input sequences in both directions.
carry, outputs = bi_rnn(inputs, seq_lengths)
```
Next we will discuss `RNN`, `Bidirectional`, and proposed changes to `RNNCellBase`.
### RNNBase
The `RNNBase` class serves as a base class for the `RNN` class, it specifies
the API that all RNN layers should implement to be compatible with the `Bidirectional`.
`RNNBase` contains the `__call__` and `flip_sequences` methods:
```python
class RNNBase(Protocol):
def __call__(
self,
inputs: jax.Array,
*,
initial_carry: Optional[Carry] = None,
init_key: Optional[random.KeyArray] = None,
seq_lengths: Optional[Array] = None,
return_carry: Optional[bool] = None,
time_major: Optional[bool] = None,
reverse: Optional[bool] = None,
keep_order: Optional[bool] = None,
) -> Union[Output, Tuple[Carry, Output]]:
...
```
Where:
* `inputs`: the input sequence.
* `initial_carry`: the initial carry, if not provided it will be initialized
using the cell's :meth:`RNNCellBase.initialize_carry` method.
* `init_key`: a PRNG key used to initialize the carry, if not provided
``jax.random.key(0)`` will be used. Most cells will ignore this
argument.
* `seq_lengths`: an optional integer array of shape ``(*batch)`` indicating
the length of each sequence, elements whose index in the time dimension
is greater than the corresponding length will be considered padding and
will be ignored.
* `return_carry`: if ``return_carry=False`` (default) only the output sequence is returned,
else it will return a tuple of the final carry and the output sequence.
* `time_major`: if ``time_major=False`` (default) it will expect inputs with shape
``(*batch, time, *features)``, else it will expect inputs with shape ``(time, *batch, *features)``.
* `reverse`: if ``reverse=False`` (default) the sequence is
processed from left to right and returned in the original order, else it will be processed
from right to left, and returned in reverse order. If ``seq_lengths`` is passed,
padding will always remain at the end of the sequence.
* `keep_order`: if ``keep_order=True``, when ``reverse=True``
the output will be reversed back to the original order after processing, this is
useful to align sequences in bidirectional RNNs. If ``keep_order=False`` (default),
the output will remain in the order specified by ``reverse``.
* `Returns`: if ``return_carry=False`` (default) only the output sequence is returned,
else it will return a tuple of the final carry and the output sequence.
### RNN
The `RNN` module inherits from `RNNBase`, it main function is to apply an `RNNCellBase` instance over a batch of input sequences, it can be used with any type of cell (e.g., `GRUCell`, `LSTMCell`, etc). It accepts the following parameters:
```python
class RNN(RNNBase):
cell: RNNCellBase,
cell_size: int | Tuple[int, ...]
time_axis: int = -2,
variable_axes = FrozenDict(),
variable_broadcast: CollectionFilter = 'params'
variable_carry: CollectionFilter = False
split_rngs = FrozenDict({'params': False})
# implement RNNBase
...
```
Attributes like `variable_axes`, `variable_broadcast`, `variable_carry`, and `split_rngs` are directly passed to `nn.scan`, their default values are set such that common cells like `LSTMCell` and `GRUCell` work out of the box.
### Masking
`seq_lengths` is defined as an integer array of shape `(*batch,)` indicating the length of each sequence.
<details><summary>Discussion</summary>
There are various masking formats found in other frameworks, here are some of the most popular ones:
* **Binary masking**: specifies per-sample and timestep whether that data point should be included or not in the computation, it can be non-contigous (e.g., [1, 1, 0, 1]). This is used by Keras.
* **Sequence length masking**: specifies per-sample the number of non-padding examples contained in the sequence, any padding contained in the sequence should be stacked at the end. This is used by FlaxFormer.
* **Segmentation Mask**: specifies row and timestep to which sample the data point belongs to, this format allows more than one sample per row which potentially reduces the total amount of padding needed (e.g. [1, 1, 1, 2, 2, 0, 0]). Pytorch uses this representation (see [pack_padded_sequence](https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pack_padded_sequence.html)).
While Sequence packing (see [LM1B example](https://github.com/google/flax/blob/main/examples/lm1b/input_pipeline.py#L90-L92)) is is more powerful, its implementation is more complex and it is not clear whether it is worth the effort. The simplest format is sequence length masking, which is the one we propose to use.
</details>
### Bidirectional
Bidirectional processing can be achieved via a Module that accepts a `forward_rnn` Module and a `backward_rnn` Module, both of which should be `RNN` instances, in order to process the input sequence in both directions. Here we present some pseudo code of the implementation:
```python
def __call__(self, inputs, seq_lengths):
# Encode in the forward direction.
carry_forward, outputs_forward = self.forward_rnn(
inputs, seq_lengths=seq_lengths,
return_carry=True, reverse=False,
)
# Encode in the reverse order.
carry_backward, outputs_backward = self.backward_rnn(
inputs, seq_lengths=seq_lengths,
return_carry=True, reverse=True, # process in reverse order
keep_order=True, # but return the sequence in the original order
)
# Merge both sequences.
outputs = jax.tree.map(self.merge_fn, outputs_forward, outputs_backward)
return (carry_forward, carry_backward), outputs
```
Here `merge_fn` a function that takes both outputs and fuses them (`concat` by default). As showcased in the beginning of this document, usage would look like this:
```python
forward_rnn = nn.RNN(nn.LSTMCell(), cell_size=32)
backward_rnn = nn.RNN(nn.GRUCell(), cell_size=32)
# Bidirectional combinator.
bi_rnn = nn.Bidirectional(forward_rnn, backward_rnn)
# Encodes a batch of input sequences in both directions.
carry, outputs = bi_rnn(inputs, seq_lengths)
```
### Recurrent Dropout
There are two main uses of dropout in RNNs:
1. Input dropout: regular dropout applied to the inputs, different for every step.
4. Recurrent dropout: applies dropout to a recurrent input/output, same for every step.
Flax's `nn.scan` can easily express both types of dropout via `split_rns`, input dropout would split rngs while recurrent dropout would not. [#2540](https://github.com/google/flax/pull/2540) was introduces such that the `rng_name` in `nn.Dropout` can now be defined by the user, this way Cells could define both types of dropout e.g:
```python
self.dropout = nn.Dropout(...) # input dropout
self.recurrent_dropout = nn.Dropout(..., rng_collection='recurrent_dropout')
```
Based on this, `nn.scan` / `nn.RNN` can now specify `split_rngs` accordingly e.g:
```
nn.scan(scan_fn, ..., split_rngs={'dropout': True, 'recurrent_dropout': False})
```
# Future ideas
<details><summary>show</summary>
### Sequence Packing
Allow packing multiple sequences to make efficient use of space/memory. This might result in a trade-off where step time is higher (because at each step we need to check whether we are starting a new sequence and reset the carry/initial state), but where less padding is used increasing efficiency overall.
### RNNCell redesign
#### Make initialize_state an instance method
First altenative is to make `initialize_carry` a instance method. With this change hyperparameters can be passed directly to the cell, it signature would look like this:
```python
def initialize_carry(self, sample_input) -> Carry:
...
```
Usage would look like this:
```python
LSTM = nn.scan(
nn.LSTMCell, variable_broadcast='params',
split_rngs={'dropout': True})
lstm = LSTM(features=32)
carry = lstm.initialize_carry(x[:, 0])
carry, y = lstm(carry, x)
```
#### Remove initialize_carry
An alternative is to remove `initialize_carry` entirely and have the carry state be handled as a carry collection. This would simplify usage quite a bit:
```python
LSTM = nn.scan(
nn.LSTMCell, variable_broadcast='params',
split_rngs={'dropout': True})
y = LSTM(features=32)(carry, x)
```
However, this would require `nn.scan` to support initialization of carry collections which is currently not possible. Also, users would have to specify that a collection is mutable e.g. `mutable=['carry']`, even if they are not interested in the output carry state.
</details>
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@docs_nnx@flip@2396-rnn.md@.PATH_END.py
|
{
"filename": "plot_diag_fold.py",
"repo_name": "petigura/terra",
"repo_path": "terra_extracted/terra-master/scripts/plot_diag_fold.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Create a diagnostic plot based on the Kepler team KOI ephemeris
#
import matplotlib
from argparse import ArgumentParser
prsr = ArgumentParser()
prsr.add_argument('koi' ,type=str,)
prsr.add_argument('-o',type=str,default=None,help='png')
args = prsr.parse_args()
koi = args.koi
if args.o:
matplotlib.use('Agg')
import h5py
import kplot
import os
import h5plus
import analysis
import keptoy
import numpy as np
from matplotlib.pylab import plt
q12 = analysis.loadKOI('Q12')
q12 = q12.dropna()
q12.index =q12.koi
q12['Pcad'] = q12['P'] / keptoy.lc
q12['twd'] = np.round(q12['tdur'] / 24 / keptoy.lc ).astype(int)
q12['mean'] = q12['df'] * 1e-6
q12['s2n'] = 0
q12['noise'] = 0
tpar = dict(q12.ix[koi]) # transit parameters for a particular koi
opath = 'grid/%(kic)09d.h5' % tpar
npath = opath.split('/')[-1].split('.')[0]+'_temp.h5'
cmd = 'cp %s %s' % (opath,npath)
os.system(cmd)
with h5plus.File(npath) as h5:
kplot.plot_diag(h5,tpar=tpar)
if args.o is not None:
plt.gcf().savefig(args.o)
print "created figure %s" % args.o
else:
plt.show()
|
petiguraREPO_NAMEterraPATH_START.@terra_extracted@terra-master@scripts@plot_diag_fold.py@.PATH_END.py
|
{
"filename": "test_util.py",
"repo_name": "tnakazato/priism",
"repo_path": "priism_extracted/priism-main/tests/test_util.py",
"type": "Python"
}
|
# Copyright (C) 2019-2022
# Inter-University Research Institute Corporation, National Institutes of Natural Sciences
# 2-21-1, Osawa, Mitaka, Tokyo, 181-8588, Japan.
#
# This file is part of PRIISM.
#
# PRIISM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PRIISM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PRIISM. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest
import numpy as np
import priism.alma.util as util
class RandomIndexGeneratorTest(unittest.TestCase):
"""
Test RandomIndexGenerator
test_too_few_ws test num_ws < num_fold (raise AssertionError)
test_negative_subset_id test negative subset ID (raise AssertionError)
test_too_large_subset_id test subset ID >= num_fold (raise AssertionError)
test_random_index test random index for subset
test_random_index_with_mod test random index for subset (num_ws % num_fold > 0)
test_default_fold test default num_fold (should be 10)
"""
def _init_generator(self, num_ws, num_fold=None):
if num_fold is None:
return util.RandomIndexGenerator(num_ws)
else:
return util.RandomIndexGenerator(num_ws, num_fold)
def test_too_few_ws(self):
"""test_too_few_ws: test num_ws < num_fold (raise AssertionError)"""
with self.assertRaises(AssertionError):
self._init_generator(3, 10)
def test_negative_subset_id(self):
"""test_negative_subset_id: test negative subset ID (raise AssertionError)"""
num_ws = 20
num_fold = 5
generator = self._init_generator(num_ws, num_fold)
with self.assertRaises(AssertionError):
random_index = generator.get_subset_index(-1)
def test_too_large_subset_id(self):
"""test_too_large_subset_id: test subset ID >= num_fold (raise AssertionError)"""
num_ws = 20
num_fold = 5
generator = self._init_generator(num_ws, num_fold)
with self.assertRaises(AssertionError):
random_index = generator.get_subset_index(num_fold)
with self.assertRaises(AssertionError):
random_index = generator.get_subset_index(num_fold + 1)
def _run_successful_test(self, num_ws, num_fold=None):
generator = self._init_generator(num_ws, num_fold)
# global consistency check
random_index = generator.random_index
self.assertEqual(num_ws, len(random_index))
index_flag = np.zeros(num_ws, dtype=bool)
self.assertTrue(np.all(index_flag == False))
delta_list = []
# per subset check
nmod = num_ws % num_fold
ndiv = num_ws // num_fold
for subset_id in range(num_fold):
subset_index = generator.get_subset_index(subset_id)
print('subset {0}: index {1}'.format(subset_id, subset_index))
# check if size of subset index is correct
num_index = ndiv
if subset_id < nmod:
num_index += 1
self.assertEqual(num_index, len(subset_index))
# check if subset index is sorted in ascending order
if num_index > 1:
delta = subset_index[1:] - subset_index[:-1]
self.assertTrue(np.all(delta > 0))
# check if subset index is random (not regularly spaced)
if num_index > 1:
# NOTE: index can be regularly spaced by chance even if
# index assignment is globally random. So, per
# subset check is skipped and instead store
# deltas for each subset for subsequent use
delta = subset_index[1:] - subset_index[:-1]
delta_list.append(delta)
# activate flag
index_flag[subset_index] = True
# check if all index are included in any of subset
self.assertTrue(np.all(index_flag == True))
# check if index spacing is not unique
print(delta_list)
flattened = []
for delta in delta_list:
flattened.extend(delta)
self.assertFalse(np.all(flattened == flattened[0]))
def test_random_index(self):
"""test_random_index: test random index for subset"""
self._run_successful_test(20, 5)
def test_random_index_with_mod(self):
"""test_random_index: test random index for subset (num_ws % num_fold > 0)"""
self._run_successful_test(23, 5)
def test_default_fold(self):
"""test_default_fold: test default num_fold (should be 10)"""
generator = self._init_generator(100)
self.assertEqual(10, generator.num_fold)
|
tnakazatoREPO_NAMEpriismPATH_START.@priism_extracted@priism-main@tests@test_util.py@.PATH_END.py
|
{
"filename": "callables.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/utilities/callables.py",
"type": "Python"
}
|
"""
Utilities for working with Python callables.
"""
import ast
import importlib.util
import inspect
import warnings
from collections import OrderedDict
from collections.abc import Iterable
from functools import partial
from logging import Logger
from pathlib import Path
from typing import Any, Callable, Optional, Union, cast
import cloudpickle # type: ignore # no stubs available
import pydantic
from griffe import Docstring, DocstringSectionKind, Parser, parse
from typing_extensions import Literal, TypeVar
from prefect._internal.pydantic.v1_schema import has_v1_type_as_param
from prefect._internal.pydantic.v2_schema import (
create_v2_schema,
process_v2_params,
)
from prefect.exceptions import (
MappingLengthMismatch,
MappingMissingIterable,
ParameterBindError,
ReservedArgumentError,
SignatureMismatchError,
)
from prefect.logging.loggers import disable_logger, get_logger
from prefect.utilities.annotations import allow_failure, quote, unmapped
from prefect.utilities.collections import isiterable
from prefect.utilities.importtools import safe_load_namespace
logger: Logger = get_logger(__name__)
R = TypeVar("R", infer_variance=True)
def get_call_parameters(
fn: Callable[..., Any],
call_args: tuple[Any, ...],
call_kwargs: dict[str, Any],
apply_defaults: bool = True,
) -> dict[str, Any]:
"""
Bind a call to a function to get parameter/value mapping. Default values on
the signature will be included if not overridden.
If the function has a `__prefect_self__` attribute, it will be included as
the first parameter. This attribute is set when Prefect decorates a bound
method, so this approach allows Prefect to work with bound methods in a way
that is consistent with how Python handles them (i.e. users don't have to
pass the instance argument to the method) while still making the implicit self
argument visible to all of Prefect's parameter machinery (such as cache key
functions).
Raises a ParameterBindError if the arguments/kwargs are not valid for the
function
"""
if hasattr(fn, "__prefect_self__"):
call_args = (getattr(fn, "__prefect_self__"), *call_args)
try:
bound_signature = inspect.signature(fn).bind(*call_args, **call_kwargs)
except TypeError as exc:
raise ParameterBindError.from_bind_failure(fn, exc, call_args, call_kwargs)
if apply_defaults:
bound_signature.apply_defaults()
# We cast from `OrderedDict` to `dict` because Dask will not convert futures in an
# ordered dictionary to values during execution; this is the default behavior in
# Python 3.9 anyway.
return dict(bound_signature.arguments)
def get_parameter_defaults(
fn: Callable[..., Any],
) -> dict[str, Any]:
"""
Get default parameter values for a callable.
"""
signature = inspect.signature(fn)
parameter_defaults: dict[str, Any] = {}
for name, param in signature.parameters.items():
if param.default is not signature.empty:
parameter_defaults[name] = param.default
return parameter_defaults
def explode_variadic_parameter(
fn: Callable[..., Any], parameters: dict[str, Any]
) -> dict[str, Any]:
"""
Given a parameter dictionary, move any parameters stored in a variadic keyword
argument parameter (i.e. **kwargs) into the top level.
Example:
```python
def foo(a, b, **kwargs):
pass
parameters = {"a": 1, "b": 2, "kwargs": {"c": 3, "d": 4}}
explode_variadic_parameter(foo, parameters)
# {"a": 1, "b": 2, "c": 3, "d": 4}
```
"""
variadic_key = None
for key, parameter in inspect.signature(fn).parameters.items():
if parameter.kind == parameter.VAR_KEYWORD:
variadic_key = key
break
if not variadic_key:
return parameters
new_parameters = parameters.copy()
for key, value in new_parameters.pop(variadic_key, {}).items():
new_parameters[key] = value
return new_parameters
def collapse_variadic_parameters(
fn: Callable[..., Any], parameters: dict[str, Any]
) -> dict[str, Any]:
"""
Given a parameter dictionary, move any parameters stored not present in the
signature into the variadic keyword argument.
Example:
```python
def foo(a, b, **kwargs):
pass
parameters = {"a": 1, "b": 2, "c": 3, "d": 4}
collapse_variadic_parameters(foo, parameters)
# {"a": 1, "b": 2, "kwargs": {"c": 3, "d": 4}}
```
"""
signature_parameters = inspect.signature(fn).parameters
variadic_key = None
for key, parameter in signature_parameters.items():
if parameter.kind == parameter.VAR_KEYWORD:
variadic_key = key
break
missing_parameters = set(parameters.keys()) - set(signature_parameters.keys())
if not missing_parameters:
# no missing parameters, return parameters unchanged
return parameters
if not variadic_key:
raise ValueError(
f"Signature for {fn} does not include any variadic keyword argument "
"but parameters were given that are not present in the signature."
)
new_parameters = parameters.copy()
new_parameters[variadic_key] = {
key: new_parameters.pop(key) for key in missing_parameters
}
return new_parameters
def parameters_to_args_kwargs(
fn: Callable[..., Any],
parameters: dict[str, Any],
) -> tuple[tuple[Any, ...], dict[str, Any]]:
"""
Convert a `parameters` dictionary to positional and keyword arguments
The function _must_ have an identical signature to the original function or this
will return an empty tuple and dict.
"""
function_params = inspect.signature(fn).parameters.keys()
# Check for parameters that are not present in the function signature
unknown_params = parameters.keys() - function_params
if unknown_params:
raise SignatureMismatchError.from_bad_params(
list(function_params), list(parameters)
)
bound_signature = inspect.signature(fn).bind_partial()
bound_signature.arguments = OrderedDict(parameters)
return bound_signature.args, bound_signature.kwargs
def call_with_parameters(fn: Callable[..., R], parameters: dict[str, Any]) -> R:
"""
Call a function with parameters extracted with `get_call_parameters`
The function _must_ have an identical signature to the original function or this
will fail. If you need to send to a function with a different signature, extract
the args/kwargs using `parameters_to_positional_and_keyword` directly
"""
args, kwargs = parameters_to_args_kwargs(fn, parameters)
return fn(*args, **kwargs)
def cloudpickle_wrapped_call(
__fn: Callable[..., Any], *args: Any, **kwargs: Any
) -> Callable[[], bytes]:
"""
Serializes a function call using cloudpickle then returns a callable which will
execute that call and return a cloudpickle serialized return value
This is particularly useful for sending calls to libraries that only use the Python
built-in pickler (e.g. `anyio.to_process` and `multiprocessing`) but may require
a wider range of pickling support.
"""
payload = cloudpickle.dumps((__fn, args, kwargs)) # type: ignore # no stubs available
return partial(_run_serialized_call, payload)
def _run_serialized_call(payload: bytes) -> bytes:
"""
Defined at the top-level so it can be pickled by the Python pickler.
Used by `cloudpickle_wrapped_call`.
"""
fn, args, kwargs = cloudpickle.loads(payload)
retval = fn(*args, **kwargs)
return cloudpickle.dumps(retval) # type: ignore # no stubs available
class ParameterSchema(pydantic.BaseModel):
"""Simple data model corresponding to an OpenAPI `Schema`."""
title: Literal["Parameters"] = "Parameters"
type: Literal["object"] = "object"
properties: dict[str, Any] = pydantic.Field(default_factory=dict)
required: list[str] = pydantic.Field(default_factory=list)
definitions: dict[str, Any] = pydantic.Field(default_factory=dict)
def model_dump_for_openapi(self) -> dict[str, Any]:
result = self.model_dump(mode="python", exclude_none=True)
if "required" in result and not result["required"]:
del result["required"]
return result
def parameter_docstrings(docstring: Optional[str]) -> dict[str, str]:
"""
Given a docstring in Google docstring format, parse the parameter section
and return a dictionary that maps parameter names to docstring.
Args:
docstring: The function's docstring.
Returns:
Mapping from parameter names to docstrings.
"""
param_docstrings: dict[str, str] = {}
if not docstring:
return param_docstrings
with disable_logger("griffe"):
parsed = parse(Docstring(docstring), Parser.google)
for section in parsed:
if section.kind != DocstringSectionKind.parameters:
continue
param_docstrings = {
parameter.name: parameter.description for parameter in section.value
}
return param_docstrings
def process_v1_params(
param: inspect.Parameter,
*,
position: int,
docstrings: dict[str, str],
aliases: dict[str, str],
) -> tuple[str, Any, Any]:
# Pydantic model creation will fail if names collide with the BaseModel type
if hasattr(pydantic.BaseModel, param.name):
name = param.name + "__"
aliases[name] = param.name
else:
name = param.name
type_ = Any if param.annotation is inspect.Parameter.empty else param.annotation
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=pydantic.warnings.PydanticDeprecatedSince20
)
field: Any = pydantic.Field( # type: ignore # this uses the v1 signature, not v2
default=... if param.default is param.empty else param.default,
title=param.name,
description=docstrings.get(param.name, None),
alias=aliases.get(name),
position=position,
)
return name, type_, field
def create_v1_schema(
name_: str, model_cfg: type[Any], model_fields: Optional[dict[str, Any]] = None
) -> dict[str, Any]:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=pydantic.warnings.PydanticDeprecatedSince20
)
model_fields = model_fields or {}
model: type[pydantic.BaseModel] = pydantic.create_model( # type: ignore # this uses the v1 signature, not v2
name_,
__config__=model_cfg, # type: ignore # this uses the v1 signature, not v2
**model_fields,
)
return model.schema(by_alias=True) # type: ignore # this uses the v1 signature, not v2
def parameter_schema(fn: Callable[..., Any]) -> ParameterSchema:
"""Given a function, generates an OpenAPI-compatible description
of the function's arguments, including:
- name
- typing information
- whether it is required
- a default value
- additional constraints (like possible enum values)
Args:
fn (Callable): The function whose arguments will be serialized
Returns:
ParameterSchema: the argument schema
"""
try:
signature = inspect.signature(fn, eval_str=True) # novm
except (NameError, TypeError):
# `eval_str` is not available in Python < 3.10
signature = inspect.signature(fn)
docstrings = parameter_docstrings(inspect.getdoc(fn))
return generate_parameter_schema(signature, docstrings)
def parameter_schema_from_entrypoint(entrypoint: str) -> ParameterSchema:
"""
Generate a parameter schema from an entrypoint string.
Will load the source code of the function and extract the signature and docstring
to generate the schema.
Useful for generating a schema for a function when instantiating the function may
not be possible due to missing imports or other issues.
Args:
entrypoint: A string representing the entrypoint to a function. The string
should be in the format of `module.path.to.function:do_stuff`.
Returns:
ParameterSchema: The parameter schema for the function.
"""
filepath = None
if ":" in entrypoint:
# split by the last colon once to handle Windows paths with drive letters i.e C:\path\to\file.py:do_stuff
path, func_name = entrypoint.rsplit(":", maxsplit=1)
source_code = Path(path).read_text()
filepath = path
else:
path, func_name = entrypoint.rsplit(".", maxsplit=1)
spec = importlib.util.find_spec(path)
if not spec or not spec.origin:
raise ValueError(f"Could not find module {path!r}")
source_code = Path(spec.origin).read_text()
signature = _generate_signature_from_source(source_code, func_name, filepath)
docstring = _get_docstring_from_source(source_code, func_name)
return generate_parameter_schema(signature, parameter_docstrings(docstring))
def generate_parameter_schema(
signature: inspect.Signature, docstrings: dict[str, str]
) -> ParameterSchema:
"""
Generate a parameter schema from a function signature and docstrings.
To get a signature from a function, use `inspect.signature(fn)` or
`_generate_signature_from_source(source_code, func_name)`.
Args:
signature: The function signature.
docstrings: A dictionary mapping parameter names to docstrings.
Returns:
ParameterSchema: The parameter schema.
"""
model_fields: dict[str, Any] = {}
aliases: dict[str, str] = {}
if not has_v1_type_as_param(signature):
config = pydantic.ConfigDict(arbitrary_types_allowed=True)
create_schema = partial(create_v2_schema, model_cfg=config)
process_params = process_v2_params
else:
class ModelConfig:
arbitrary_types_allowed = True
create_schema = partial(create_v1_schema, model_cfg=ModelConfig)
process_params = process_v1_params
for position, param in enumerate(signature.parameters.values()):
name, type_, field = process_params(
param, position=position, docstrings=docstrings, aliases=aliases
)
# Generate a Pydantic model at each step so we can check if this parameter
# type supports schema generation
try:
create_schema("CheckParameter", model_fields={name: (type_, field)})
except (ValueError, TypeError):
# This field's type is not valid for schema creation, update it to `Any`
type_ = Any
model_fields[name] = (type_, field)
# Generate the final model and schema
schema = create_schema("Parameters", model_fields=model_fields)
return ParameterSchema(**schema)
def raise_for_reserved_arguments(
fn: Callable[..., Any], reserved_arguments: Iterable[str]
) -> None:
"""Raise a ReservedArgumentError if `fn` has any parameters that conflict
with the names contained in `reserved_arguments`."""
function_parameters = inspect.signature(fn).parameters
for argument in reserved_arguments:
if argument in function_parameters:
raise ReservedArgumentError(
f"{argument!r} is a reserved argument name and cannot be used."
)
def _generate_signature_from_source(
source_code: str, func_name: str, filepath: Optional[str] = None
) -> inspect.Signature:
"""
Extract the signature of a function from its source code.
Will ignore missing imports and exceptions while loading local class definitions.
Args:
source_code: The source code where the function named `func_name` is declared.
func_name: The name of the function.
Returns:
The signature of the function.
"""
# Load the namespace from the source code. Missing imports and exceptions while
# loading local class definitions are ignored.
namespace = safe_load_namespace(source_code, filepath=filepath)
# Parse the source code into an AST
parsed_code = ast.parse(source_code)
func_def = next(
(
node
for node in ast.walk(parsed_code)
if isinstance(
node,
(
ast.FunctionDef,
ast.AsyncFunctionDef,
),
)
and node.name == func_name
),
None,
)
if func_def is None:
raise ValueError(f"Function {func_name} not found in source code")
parameters: list[inspect.Parameter] = []
# Handle annotations for positional only args e.g. def func(a, /, b, c)
for arg in func_def.args.posonlyargs:
name = arg.arg
annotation = arg.annotation
if annotation is not None:
try:
ann_code = compile(ast.Expression(annotation), "<string>", "eval")
annotation = eval(ann_code, namespace)
except Exception as e:
logger.debug("Failed to evaluate annotation for %s: %s", name, e)
annotation = inspect.Parameter.empty
else:
annotation = inspect.Parameter.empty
param = inspect.Parameter(
name, inspect.Parameter.POSITIONAL_ONLY, annotation=annotation
)
parameters.append(param)
# Determine the annotations for args e.g. def func(a: int, b: str, c: float)
for arg in func_def.args.args:
name = arg.arg
annotation = arg.annotation
if annotation is not None:
try:
# Compile and evaluate the annotation
ann_code = compile(ast.Expression(annotation), "<string>", "eval")
annotation = eval(ann_code, namespace)
except Exception as e:
# Don't raise an error if the annotation evaluation fails. Set the
# annotation to `inspect.Parameter.empty` instead which is equivalent to
# not having an annotation.
logger.debug("Failed to evaluate annotation for %s: %s", name, e)
annotation = inspect.Parameter.empty
else:
annotation = inspect.Parameter.empty
param = inspect.Parameter(
name, inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=annotation
)
parameters.append(param)
# Handle default values for args e.g. def func(a=1, b="hello", c=3.14)
defaults = [None] * (
len(func_def.args.args) - len(func_def.args.defaults)
) + func_def.args.defaults
for param, default in zip(parameters, defaults):
if default is not None:
try:
def_code = compile(ast.Expression(default), "<string>", "eval")
default = eval(def_code, namespace)
except Exception as e:
logger.debug(
"Failed to evaluate default value for %s: %s", param.name, e
)
default = None # Set to None if evaluation fails
parameters[parameters.index(param)] = param.replace(default=default)
# Handle annotations for keyword only args e.g. def func(*, a: int, b: str)
for kwarg in func_def.args.kwonlyargs:
name = kwarg.arg
annotation = kwarg.annotation
if annotation is not None:
try:
ann_code = compile(ast.Expression(annotation), "<string>", "eval")
annotation = eval(ann_code, namespace)
except Exception as e:
logger.debug("Failed to evaluate annotation for %s: %s", name, e)
annotation = inspect.Parameter.empty
else:
annotation = inspect.Parameter.empty
param = inspect.Parameter(
name, inspect.Parameter.KEYWORD_ONLY, annotation=annotation
)
parameters.append(param)
# Handle default values for keyword only args e.g. def func(*, a=1, b="hello")
defaults = [None] * (
len(func_def.args.kwonlyargs) - len(func_def.args.kw_defaults)
) + func_def.args.kw_defaults
for param, default in zip(parameters[-len(func_def.args.kwonlyargs) :], defaults):
if default is not None:
try:
def_code = compile(ast.Expression(default), "<string>", "eval")
default = eval(def_code, namespace)
except Exception as e:
logger.debug(
"Failed to evaluate default value for %s: %s", param.name, e
)
default = None
parameters[parameters.index(param)] = param.replace(default=default)
# Handle annotations for varargs and kwargs e.g. def func(*args: int, **kwargs: str)
if func_def.args.vararg:
parameters.append(
inspect.Parameter(
func_def.args.vararg.arg, inspect.Parameter.VAR_POSITIONAL
)
)
if func_def.args.kwarg:
parameters.append(
inspect.Parameter(func_def.args.kwarg.arg, inspect.Parameter.VAR_KEYWORD)
)
# Handle return annotation e.g. def func() -> int
return_annotation = func_def.returns
if return_annotation is not None:
try:
ret_ann_code = compile(
ast.Expression(return_annotation), "<string>", "eval"
)
return_annotation = eval(ret_ann_code, namespace)
except Exception as e:
logger.debug("Failed to evaluate return annotation: %s", e)
return_annotation = inspect.Signature.empty
return inspect.Signature(parameters, return_annotation=return_annotation)
def _get_docstring_from_source(source_code: str, func_name: str) -> Optional[str]:
"""
Extract the docstring of a function from its source code.
Args:
source_code (str): The source code of the function.
func_name (str): The name of the function.
Returns:
The docstring of the function. If the function has no docstring, returns None.
"""
parsed_code = ast.parse(source_code)
func_def = next(
(
node
for node in ast.walk(parsed_code)
if isinstance(
node,
(
ast.FunctionDef,
ast.AsyncFunctionDef,
),
)
and node.name == func_name
),
None,
)
if func_def is None:
raise ValueError(f"Function {func_name} not found in source code")
if (
func_def.body
and isinstance(func_def.body[0], ast.Expr)
and isinstance(func_def.body[0].value, ast.Constant)
):
return func_def.body[0].value.value
return None
def expand_mapping_parameters(
func: Callable[..., Any], parameters: dict[str, Any]
) -> list[dict[str, Any]]:
"""
Generates a list of call parameters to be used for individual calls in a mapping
operation.
Args:
func: The function to be called
parameters: A dictionary of parameters with iterables to be mapped over
Returns:
list: A list of dictionaries to be used as parameters for each
call in the mapping operation
"""
# Ensure that any parameters in kwargs are expanded before this check
parameters = explode_variadic_parameter(func, parameters)
iterable_parameters: dict[str, list[Any]] = {}
static_parameters: dict[str, Any] = {}
annotated_parameters: dict[str, Union[allow_failure[Any], quote[Any]]] = {}
for key, val in parameters.items():
if isinstance(val, (allow_failure, quote)):
# Unwrap annotated parameters to determine if they are iterable
annotated_parameters[key] = val
val: Any = val.unwrap()
if isinstance(val, unmapped):
static_parameters[key] = cast(unmapped[Any], val).value
elif isiterable(val):
iterable_parameters[key] = list(val)
else:
static_parameters[key] = val
if not iterable_parameters:
raise MappingMissingIterable(
"No iterable parameters were received. Parameters for map must "
f"include at least one iterable. Parameters: {parameters}"
)
iterable_parameter_lengths = {
key: len(val) for key, val in iterable_parameters.items()
}
lengths = set(iterable_parameter_lengths.values())
if len(lengths) > 1:
raise MappingLengthMismatch(
"Received iterable parameters with different lengths. Parameters for map"
f" must all be the same length. Got lengths: {iterable_parameter_lengths}"
)
map_length = list(lengths)[0]
call_parameters_list: list[dict[str, Any]] = []
for i in range(map_length):
call_parameters = {key: value[i] for key, value in iterable_parameters.items()}
call_parameters.update({key: value for key, value in static_parameters.items()})
# Add default values for parameters; these are skipped earlier since they should
# not be mapped over
for key, value in get_parameter_defaults(func).items():
call_parameters.setdefault(key, value)
# Re-apply annotations to each key again
for key, annotation in annotated_parameters.items():
call_parameters[key] = annotation.rewrap(call_parameters[key])
# Collapse any previously exploded kwargs
call_parameters_list.append(collapse_variadic_parameters(func, call_parameters))
return call_parameters_list
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@utilities@callables.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/SpinWeight_minus2_SphHarmonics/__init__.py",
"type": "Python"
}
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@SpinWeight_minus2_SphHarmonics@__init__.py@.PATH_END.py
|
|
{
"filename": "openggcm_gm_4Dcdf.py",
"repo_name": "nasa/Kamodo",
"repo_path": "Kamodo_extracted/Kamodo-master/kamodo_ccmc/readers/openggcm_gm_4Dcdf.py",
"type": "Python"
}
|
'''
Original version: Lutz Raestatter Oct 1(?), 2021
Modify to work with flythrough: Oct 5, 2021 (Rebecca Ringuette)
'''
from datetime import datetime, timezone
# standard model dictionary for reference
model_varnames = {'bx': ['B_x', 'x component of magnetic field',
0, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'nT'],
'by': ['B_y', 'y component of magnetic field',
1, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'nT'],
'bz': ['B_z', 'z component of magnetic field',
2, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'nT'],
'ex': ['E_x', 'x component of electric field',
6, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'mV/m'],
'ey': ['E_y', 'y component of electric field',
7, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'mV/m'],
'ez': ['E_z', 'z component of electric field',
8, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'mV/m'],
'vx': ['v_plasmax', 'x component of plasma velocity',
9, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'km/s'],
'vy': ['v_plasmay', 'y component of plasma velocity',
10, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'km/s'],
'vz': ['v_plasmaz', 'z component of plasma velocity',
11, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'km/s'],
'rr': ['N_plasma', 'number density of plasma ' +
'(hydrogen equivalent)',
12, 'GSE', 'car', ['time', 'x', 'y', 'z'], '1/cm**3'],
'resis': ['eta', 'resistivity',
13, 'GSE', 'car', ['time', 'x', 'y', 'z'],
'm**2/s'],
'pp': ['P_plasma', 'plasma pressure',
14, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'pPa'],
'xjx': ['j_x', 'current density, x component',
15, 'GSE', 'car', ['time', 'x', 'y', 'z'],
'muA/m**2'],
'xjy': ['j_y', 'current density, y component',
16, 'GSE', 'car', ['time', 'x', 'y', 'z'],
'muA/m**2'],
'xjz': ['j_z', 'current density, z component',
17, 'GSE', 'car', ['time', 'x', 'y', 'z'],
'muA/m**2']}
def MODEL():
from numpy import array, unique, squeeze
from time import perf_counter
from os.path import basename
from kamodo import Kamodo
import kamodo_ccmc.readers.reader_utilities as RU
class MODEL(Kamodo):
'''OpenGGCM_GM magnetosphere reader.
Inputs:
file_dir: a string representing the file directory of the
model output data.
Note: This reader 'walks' the entire dataset in the directory.
variables_requested = a list of variable name strings chosen from
the model_varnames dictionary in this script, specifically the
first item in the list associated with a given key.
- If empty, the reader functionalizes all possible variables
(default)
- If 'all', the reader returns the model_varnames dictionary
above for only the variables present in the given files.
filetime = boolean (default = False)
- If False, the script fully executes.
- If True, the script only executes far enough to determine the
time values associated with the chosen data.
printfiles = boolean (default = False)
- If False, the filenames associated with the data retrieved
ARE NOT printed.
- If True, the filenames associated with the data retrieved ARE
printed.
gridded_int = boolean (default = True)
- If True, the variables chosen are functionalized in both the
standard method and a gridded method.
- If False, the variables chosen are functionalized in only the
standard method.
verbose = boolean (False)
- If False, script execution and the underlying Kamodo
execution is quiet except for specified messages.
- If True, be prepared for a plethora of messages.
All inputs are described in further detail in
KamodoOnboardingInstructions.pdf.
Returns: a kamodo object (see Kamodo core documentation) containing all
requested variables in functionalized form.
Notes and special features:
- The file converter for the OpenGGCM global magnetosphere outputs
(compressed binary files) currently only runs on CCMC machines.
Please contact CCMC for the desired run to be converted to
netCDF4 (Lutz Rastaetter).
- This model reader has two special properties called
kamodo_object.near_Earth_boundary_radius and
kamodo_object.near_Earth_boundary_radius_unit that give the
inner boundaries of the radial domain for the given run. The
inner boundary will also be readily apparent when viewing any
plot including the coordinate origin (X, Y, Z) = (0, 0, 0). The
unit of the inner boundary is typically earth radii (R_E).
- The model outputs are produced with one time step per file, so
interpolation method 1 is chosen. The standard SciPy interpolator
is used.
'''
def __init__(self, file_dir, variables_requested=[],
filetime=False, verbose=False, gridded_int=True,
printfiles=False, **kwargs):
super(MODEL, self).__init__()
self.modelname = 'OpenGGCM_GM'
t0 = perf_counter() # profiling time stamp
# first, check for file list, create if DNE
list_file = file_dir + self.modelname + '_list.txt'
time_file = file_dir + self.modelname + '_times.txt'
self.times, self.pattern_files = {}, {}
if not RU._isfile(list_file) or not RU._isfile(time_file):
# collect filenames
files = sorted(RU.glob(file_dir+'*.nc'))
if len(files) == 0:
try:
from kamodo_ccmc.readers.openggcm_to_cdf import \
openggcm_combine_magnetosphere_files as gmconv
self.conversion_test = gmconv(file_dir)
except:
print('The file converter for the OpenGGCM global ' +
'magnetosphere outputs currently only runs on ' +
'CCMC machines. Please contact CCMC for the ' +
'files for the desired run converted to ' +
'netCDF4 from Lutz Rastaetter.')
return
patterns = unique([basename(file[:-19]) for file in files])
self.filename = ''.join([f+',' for f in files])[:-1]
self.filedate = datetime.strptime(
files[0][-19:-9]+' 00:00:00', '%Y-%m-%d %H:%M:%S'
).replace(tzinfo=timezone.utc)
# establish time attributes
for p in patterns:
# get list of files to loop through later
pattern_files = sorted(RU.glob(file_dir+p+'*.nc'))
self.pattern_files[p] = pattern_files
self.times[p] = {'start': [], 'end': [], 'all': []}
# loop through to get times
self.times[p]['start'] = array([RU.str_to_hrs(
f[-19:-3]+'_00', self.filedate,
format_string='%Y-%m-%d_%H_%M_%S') for f in
pattern_files])
self.times[p]['end'] = self.times[p]['start'].copy()
self.times[p]['all'] = self.times[p]['start'].copy()
# create time list file if DNE
RU.create_timelist(list_file, time_file, self.modelname,
self.times, self.pattern_files,
self.filedate)
else: # read in data and time grids from file list
self.times, self.pattern_files, self.filedate, self.filename =\
RU.read_timelist(time_file, list_file)
if filetime:
return # return times only
# perform initial check on variables_requested list
if len(variables_requested) > 0 and variables_requested != 'all':
test_list = [value[0] for key, value in model_varnames.items()]
err_list = [item for item in variables_requested if item not in
test_list]
if len(err_list) > 0:
print('Variable name(s) not recognized:', err_list)
for item in err_list:
variables_requested.remove(item)
if len(variables_requested) == 0:
return
# there is only one pattern for OpenGGCM, so just save the one grid
p = list(self.pattern_files.keys())[0]
pattern_files = self.pattern_files[p]
cdf_data = RU.Dataset(pattern_files[0], 'r')
# check var_list for variables not possible in this file set
self.err_list = []
if len(variables_requested) > 0 and\
variables_requested != 'all':
gvar_list = [key for key in model_varnames.keys()
if key in cdf_data.keys() and
model_varnames[key][0] in variables_requested]
if len(gvar_list) != len(variables_requested):
err_list = [value[0] for key, value in
model_varnames.items()
if key not in cdf_data.keys() and
value[0] in variables_requested]
self.err_list.extend(err_list) # add to master list
else:
gvar_list = [key for key in model_varnames.keys()
if key in cdf_data.keys()]
# store which file these variables came from
self.varfiles = [model_varnames[key][0] for
key in gvar_list]
self.gvarfiles = gvar_list
# initialize storage structure
self.variables = {model_varnames[gvar][0]: {
'units': model_varnames[gvar][-1], 'data': p} for gvar in
self.gvarfiles}
# get coordinate grids
self.near_Earth_boundary_radius = \
cdf_data.near_Earth_boundary_radius
self.near_Earth_boundary_radius_unit = \
cdf_data.near_Earth_boundary_radius_units
for grid in ['_x', '_y', '_z']:
setattr(self, grid, array(cdf_data[grid]))
cdf_data.close()
# print message if variables not found
if len(self.err_list) > 0:
print('Some requested variables are not available: ',
self.err_list)
# collect all possible variables in set of files and return
if variables_requested == 'all':
self.var_dict = {value[0]: value[1:] for key, value in
model_varnames.items() if value[0] in
self.varfiles}
return
# option to print files
if printfiles:
print(f'{len(self.filename)} Files:')
files = self.filename.split(',')
for f in files:
print(f)
# register interpolators for each variable
t_reg = perf_counter()
# store original list b/c gridded interpolators change keys list
varname_list = [key for key in self.variables.keys()]
for varname in varname_list:
self.register_variable(varname, gridded_int)
if verbose:
print(f'Took {perf_counter()-t_reg:.5f}s to register ' +
f'{len(varname_list)} variables.')
if verbose:
print(f'Took a total of {perf_counter()-t0:.5f}s to kamodofy' +
f' {len(varname_list)} variables.')
# define and register a 4D variable (all are 4D)
def register_variable(self, varname, gridded_int):
'''Register and functionalize the variable data.'''
# determine which file the variable came from, retrieve the coords
key = self.variables[varname]['data']
gvar = [key for key, value in model_varnames.items() if
value[0] == varname][0] # variable name in file
coord_dict = {'time': {'units': 'hr',
'data': self.times[key]['all']}}
coord_dict['X'] = {'data': self._x, 'units': 'R_E'}
coord_dict['Y'] = {'data': self._y, 'units': 'R_E'}
coord_dict['Z'] = {'data': self._z, 'units': 'R_E'}
coord_str = [value[3]+value[4] for key, value in
model_varnames.items() if value[0] == varname][0]
def func(i):
'''i is the file/time number. OpenGGCM-mag is one file per
timestep.'''
# get data from file
file = self.pattern_files[key][i]
cdf_data = RU.Dataset(file)
data = array(cdf_data[gvar])
cdf_data.close()
return squeeze(data)
# define and register the interpolators
self = RU.Functionalize_Dataset(self, coord_dict, varname,
self.variables[varname],
gridded_int, coord_str,
interp_flag=1, func=func)
return
return MODEL
|
nasaREPO_NAMEKamodoPATH_START.@Kamodo_extracted@Kamodo-master@kamodo_ccmc@readers@openggcm_gm_4Dcdf.py@.PATH_END.py
|
{
"filename": "adam_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/compiler/tests/adam_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types | self.complex_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
for dtype in self.float_types | self.complex_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
for dtype in self.float_types | self.complex_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
continue
with self.session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = array_ops.placeholder(dtype)
grads1 = array_ops.placeholder(dtype)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
else:
update2.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@compiler@tests@adam_test.py@.PATH_END.py
|
{
"filename": "self_query_hotel_search.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/cookbook/self_query_hotel_search.ipynb",
"type": "Jupyter Notebook"
}
|
# Building hotel room search with self-querying retrieval
In this example we'll walk through how to build and iterate on a hotel room search service that leverages an LLM to generate structured filter queries that can then be passed to a vector store.
For an introduction to self-querying retrieval [check out the docs](https://python.langchain.com/docs/modules/data_connection/retrievers/self_query).
## Imports and data prep
In this example we use `ChatOpenAI` for the model and `ElasticsearchStore` for the vector store, but these can be swapped out with an LLM/ChatModel and [any VectorStore that support self-querying](https://python.langchain.com/docs/integrations/retrievers/self_query/).
Download data from: https://www.kaggle.com/datasets/keshavramaiah/hotel-recommendation
```python
!pip install langchain langchain-elasticsearch lark openai elasticsearch pandas
```
```python
import pandas as pd
```
```python
details = (
pd.read_csv("~/Downloads/archive/Hotel_details.csv")
.drop_duplicates(subset="hotelid")
.set_index("hotelid")
)
attributes = pd.read_csv(
"~/Downloads/archive/Hotel_Room_attributes.csv", index_col="id"
)
price = pd.read_csv("~/Downloads/archive/hotels_RoomPrice.csv", index_col="id")
```
```python
latest_price = price.drop_duplicates(subset="refid", keep="last")[
[
"hotelcode",
"roomtype",
"onsiterate",
"roomamenities",
"maxoccupancy",
"mealinclusiontype",
]
]
latest_price["ratedescription"] = attributes.loc[latest_price.index]["ratedescription"]
latest_price = latest_price.join(
details[["hotelname", "city", "country", "starrating"]], on="hotelcode"
)
latest_price = latest_price.rename({"ratedescription": "roomdescription"}, axis=1)
latest_price["mealsincluded"] = ~latest_price["mealinclusiontype"].isnull()
latest_price.pop("hotelcode")
latest_price.pop("mealinclusiontype")
latest_price = latest_price.reset_index(drop=True)
latest_price.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>roomtype</th>
<th>onsiterate</th>
<th>roomamenities</th>
<th>maxoccupancy</th>
<th>roomdescription</th>
<th>hotelname</th>
<th>city</th>
<th>country</th>
<th>starrating</th>
<th>mealsincluded</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Vacation Home</td>
<td>636.09</td>
<td>Air conditioning: ;Closet: ;Fireplace: ;Free W...</td>
<td>4</td>
<td>Shower, Kitchenette, 2 bedrooms, 1 double bed ...</td>
<td>Pantlleni</td>
<td>Beddgelert</td>
<td>United Kingdom</td>
<td>3</td>
<td>False</td>
</tr>
<tr>
<th>1</th>
<td>Vacation Home</td>
<td>591.74</td>
<td>Air conditioning: ;Closet: ;Dishwasher: ;Firep...</td>
<td>4</td>
<td>Shower, Kitchenette, 2 bedrooms, 1 double bed ...</td>
<td>Willow Cottage</td>
<td>Beverley</td>
<td>United Kingdom</td>
<td>3</td>
<td>False</td>
</tr>
<tr>
<th>2</th>
<td>Guest room, Queen or Twin/Single Bed(s)</td>
<td>0.00</td>
<td>NaN</td>
<td>2</td>
<td>NaN</td>
<td>AC Hotel Manchester Salford Quays</td>
<td>Manchester</td>
<td>United Kingdom</td>
<td>4</td>
<td>False</td>
</tr>
<tr>
<th>3</th>
<td>Bargemaster King Accessible Room</td>
<td>379.08</td>
<td>Air conditioning: ;Free Wi-Fi in all rooms!: ;...</td>
<td>2</td>
<td>Shower</td>
<td>Lincoln Plaza London, Curio Collection by Hilton</td>
<td>London</td>
<td>United Kingdom</td>
<td>4</td>
<td>True</td>
</tr>
<tr>
<th>4</th>
<td>Twin Room</td>
<td>156.17</td>
<td>Additional toilet: ;Air conditioning: ;Blackou...</td>
<td>2</td>
<td>Room size: 15 m²/161 ft², Non-smoking, Shower,...</td>
<td>Ibis London Canning Town</td>
<td>London</td>
<td>United Kingdom</td>
<td>3</td>
<td>True</td>
</tr>
</tbody>
</table>
</div>
## Describe data attributes
We'll use a self-query retriever, which requires us to describe the metadata we can filter on.
Or if we're feeling lazy we can have a model write a draft of the descriptions for us :)
```python
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4")
res = model.predict(
"Below is a table with information about hotel rooms. "
"Return a JSON list with an entry for each column. Each entry should have "
'{"name": "column name", "description": "column description", "type": "column data type"}'
f"\n\n{latest_price.head()}\n\nJSON:\n"
)
```
```python
import json
attribute_info = json.loads(res)
attribute_info
```
[{'name': 'roomtype', 'description': 'The type of the room', 'type': 'string'},
{'name': 'onsiterate',
'description': 'The rate of the room',
'type': 'float'},
{'name': 'roomamenities',
'description': 'Amenities available in the room',
'type': 'string'},
{'name': 'maxoccupancy',
'description': 'Maximum number of people that can occupy the room',
'type': 'integer'},
{'name': 'roomdescription',
'description': 'Description of the room',
'type': 'string'},
{'name': 'hotelname', 'description': 'Name of the hotel', 'type': 'string'},
{'name': 'city',
'description': 'City where the hotel is located',
'type': 'string'},
{'name': 'country',
'description': 'Country where the hotel is located',
'type': 'string'},
{'name': 'starrating',
'description': 'Star rating of the hotel',
'type': 'integer'},
{'name': 'mealsincluded',
'description': 'Whether meals are included or not',
'type': 'boolean'}]
For low cardinality features, let's include the valid values in the description
```python
latest_price.nunique()[latest_price.nunique() < 40]
```
maxoccupancy 19
country 29
starrating 3
mealsincluded 2
dtype: int64
```python
attribute_info[-2]["description"] += (
f". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}"
)
attribute_info[3]["description"] += (
f". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}"
)
attribute_info[-3]["description"] += (
f". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}"
)
```
```python
attribute_info
```
[{'name': 'roomtype', 'description': 'The type of the room', 'type': 'string'},
{'name': 'onsiterate',
'description': 'The rate of the room',
'type': 'float'},
{'name': 'roomamenities',
'description': 'Amenities available in the room',
'type': 'string'},
{'name': 'maxoccupancy',
'description': 'Maximum number of people that can occupy the room. Valid values are [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 24]',
'type': 'integer'},
{'name': 'roomdescription',
'description': 'Description of the room',
'type': 'string'},
{'name': 'hotelname', 'description': 'Name of the hotel', 'type': 'string'},
{'name': 'city',
'description': 'City where the hotel is located',
'type': 'string'},
{'name': 'country',
'description': "Country where the hotel is located. Valid values are ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom']",
'type': 'string'},
{'name': 'starrating',
'description': 'Star rating of the hotel. Valid values are [2, 3, 4]',
'type': 'integer'},
{'name': 'mealsincluded',
'description': 'Whether meals are included or not',
'type': 'boolean'}]
## Creating a query constructor chain
Let's take a look at the chain that will convert natural language requests into structured queries.
To start we can just load the prompt and see what it looks like
```python
from langchain.chains.query_constructor.base import (
get_query_constructor_prompt,
load_query_constructor_runnable,
)
```
```python
doc_contents = "Detailed description of a hotel room"
prompt = get_query_constructor_prompt(doc_contents, attribute_info)
print(prompt.format(query="{query}"))
```
Your goal is to structure the user's query to match the request schema provided below.
<< Structured Request Schema >>
When responding use a markdown code snippet with a JSON object formatted in the following schema:
```json
{
"query": string \ text string to compare to document contents
"filter": string \ logical condition statement for filtering documents
}
```
The query string should contain only text that is expected to match the contents of documents. Any conditions in the filter should not be mentioned in the query as well.
A logical condition statement is composed of one or more comparison and logical operation statements.
A comparison statement takes the form: `comp(attr, val)`:
- `comp` (eq | ne | gt | gte | lt | lte | contain | like | in | nin): comparator
- `attr` (string): name of attribute to apply the comparison to
- `val` (string): is the comparison value
A logical operation statement takes the form `op(statement1, statement2, ...)`:
- `op` (and | or | not): logical operator
- `statement1`, `statement2`, ... (comparison statements or logical operation statements): one or more statements to apply the operation to
Make sure that you only use the comparators and logical operators listed above and no others.
Make sure that filters only refer to attributes that exist in the data source.
Make sure that filters only use the attributed names with its function names if there are functions applied on them.
Make sure that filters only use format `YYYY-MM-DD` when handling timestamp data typed values.
Make sure that filters take into account the descriptions of attributes and only make comparisons that are feasible given the type of data being stored.
Make sure that filters are only used as needed. If there are no filters that should be applied return "NO_FILTER" for the filter value.
<< Example 1. >>
Data Source:
```json
{
"content": "Lyrics of a song",
"attributes": {
"artist": {
"type": "string",
"description": "Name of the song artist"
},
"length": {
"type": "integer",
"description": "Length of the song in seconds"
},
"genre": {
"type": "string",
"description": "The song genre, one of "pop", "rock" or "rap""
}
}
}
```
User Query:
What are songs by Taylor Swift or Katy Perry about teenage romance under 3 minutes long in the dance pop genre
Structured Request:
```json
{
"query": "teenager love",
"filter": "and(or(eq(\"artist\", \"Taylor Swift\"), eq(\"artist\", \"Katy Perry\")), lt(\"length\", 180), eq(\"genre\", \"pop\"))"
}
```
<< Example 2. >>
Data Source:
```json
{
"content": "Lyrics of a song",
"attributes": {
"artist": {
"type": "string",
"description": "Name of the song artist"
},
"length": {
"type": "integer",
"description": "Length of the song in seconds"
},
"genre": {
"type": "string",
"description": "The song genre, one of "pop", "rock" or "rap""
}
}
}
```
User Query:
What are songs that were not published on Spotify
Structured Request:
```json
{
"query": "",
"filter": "NO_FILTER"
}
```
<< Example 3. >>
Data Source:
```json
{
"content": "Detailed description of a hotel room",
"attributes": {
"roomtype": {
"description": "The type of the room",
"type": "string"
},
"onsiterate": {
"description": "The rate of the room",
"type": "float"
},
"roomamenities": {
"description": "Amenities available in the room",
"type": "string"
},
"maxoccupancy": {
"description": "Maximum number of people that can occupy the room. Valid values are [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 24]",
"type": "integer"
},
"roomdescription": {
"description": "Description of the room",
"type": "string"
},
"hotelname": {
"description": "Name of the hotel",
"type": "string"
},
"city": {
"description": "City where the hotel is located",
"type": "string"
},
"country": {
"description": "Country where the hotel is located. Valid values are ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom']",
"type": "string"
},
"starrating": {
"description": "Star rating of the hotel. Valid values are [2, 3, 4]",
"type": "integer"
},
"mealsincluded": {
"description": "Whether meals are included or not",
"type": "boolean"
}
}
}
```
User Query:
{query}
Structured Request:
```python
chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0), doc_contents, attribute_info
)
```
```python
chain.invoke({"query": "I want a hotel in Southern Europe and my budget is 200 bucks."})
```
StructuredQuery(query='hotel', filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Italy'), Comparison(comparator=<Comparator.LTE: 'lte'>, attribute='onsiterate', value=200)]), limit=None)
```python
chain.invoke(
{
"query": "Find a 2-person room in Vienna or London, preferably with meals included and AC"
}
)
```
StructuredQuery(query='2-person room', filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Operation(operator=<Operator.OR: 'or'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='city', value='Vienna'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='city', value='London')]), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='maxoccupancy', value=2), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='mealsincluded', value=True), Comparison(comparator=<Comparator.CONTAIN: 'contain'>, attribute='roomamenities', value='AC')]), limit=None)
## Refining attribute descriptions
We can see that at least two issues above. First is that when we ask for a Southern European destination we're only getting a filter for Italy, and second when we ask for AC we get a literal string lookup for AC (which isn't so bad but will miss things like 'Air conditioning').
As a first step, let's try to update our description of the 'country' attribute to emphasize that equality should only be used when a specific country is mentioned.
```python
attribute_info[-3]["description"] += (
". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter."
)
chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0),
doc_contents,
attribute_info,
)
```
```python
chain.invoke({"query": "I want a hotel in Southern Europe and my budget is 200 bucks."})
```
StructuredQuery(query='hotel', filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='mealsincluded', value=False), Comparison(comparator=<Comparator.LTE: 'lte'>, attribute='onsiterate', value=200), Operation(operator=<Operator.OR: 'or'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Italy'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Spain'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Greece'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Portugal'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Croatia'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Cyprus'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Malta'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Bulgaria'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Romania'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Slovenia'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Czech Republic'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Slovakia'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Hungary'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Poland'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Estonia'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Latvia'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='country', value='Lithuania')])]), limit=None)
## Refining which attributes to filter on
This seems to have helped! Now let's try to narrow the attributes we're filtering on. More freeform attributes we can leave to the main query, which is better for capturing semantic meaning than searching for specific substrings.
```python
content_attr = ["roomtype", "roomamenities", "roomdescription", "hotelname"]
doc_contents = "A detailed description of a hotel room, including information about the room type and room amenities."
filter_attribute_info = tuple(
ai for ai in attribute_info if ai["name"] not in content_attr
)
chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0),
doc_contents,
filter_attribute_info,
)
```
```python
chain.invoke(
{
"query": "Find a 2-person room in Vienna or London, preferably with meals included and AC"
}
)
```
StructuredQuery(query='2-person room', filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Operation(operator=<Operator.OR: 'or'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='city', value='Vienna'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='city', value='London')]), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='maxoccupancy', value=2), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='mealsincluded', value=True)]), limit=None)
## Adding examples specific to our use case
We've removed the strict filter for 'AC' but it's still not being included in the query string. Our chain prompt is a few-shot prompt with some default examples. Let's see if adding use case-specific examples will help:
```python
examples = [
(
"I want a hotel in the Balkans with a king sized bed and a hot tub. Budget is $300 a night",
{
"query": "king-sized bed, hot tub",
"filter": 'and(in("country", ["Bulgaria", "Greece", "Croatia", "Serbia"]), lte("onsiterate", 300))',
},
),
(
"A room with breakfast included for 3 people, at a Hilton",
{
"query": "Hilton",
"filter": 'and(eq("mealsincluded", true), gte("maxoccupancy", 3))',
},
),
]
prompt = get_query_constructor_prompt(
doc_contents, filter_attribute_info, examples=examples
)
print(prompt.format(query="{query}"))
```
Your goal is to structure the user's query to match the request schema provided below.
<< Structured Request Schema >>
When responding use a markdown code snippet with a JSON object formatted in the following schema:
```json
{
"query": string \ text string to compare to document contents
"filter": string \ logical condition statement for filtering documents
}
```
The query string should contain only text that is expected to match the contents of documents. Any conditions in the filter should not be mentioned in the query as well.
A logical condition statement is composed of one or more comparison and logical operation statements.
A comparison statement takes the form: `comp(attr, val)`:
- `comp` (eq | ne | gt | gte | lt | lte | contain | like | in | nin): comparator
- `attr` (string): name of attribute to apply the comparison to
- `val` (string): is the comparison value
A logical operation statement takes the form `op(statement1, statement2, ...)`:
- `op` (and | or | not): logical operator
- `statement1`, `statement2`, ... (comparison statements or logical operation statements): one or more statements to apply the operation to
Make sure that you only use the comparators and logical operators listed above and no others.
Make sure that filters only refer to attributes that exist in the data source.
Make sure that filters only use the attributed names with its function names if there are functions applied on them.
Make sure that filters only use format `YYYY-MM-DD` when handling timestamp data typed values.
Make sure that filters take into account the descriptions of attributes and only make comparisons that are feasible given the type of data being stored.
Make sure that filters are only used as needed. If there are no filters that should be applied return "NO_FILTER" for the filter value.
<< Data Source >>
```json
{
"content": "A detailed description of a hotel room, including information about the room type and room amenities.",
"attributes": {
"onsiterate": {
"description": "The rate of the room",
"type": "float"
},
"maxoccupancy": {
"description": "Maximum number of people that can occupy the room. Valid values are [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 24]",
"type": "integer"
},
"city": {
"description": "City where the hotel is located",
"type": "string"
},
"country": {
"description": "Country where the hotel is located. Valid values are ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'United Kingdom']. NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter.",
"type": "string"
},
"starrating": {
"description": "Star rating of the hotel. Valid values are [2, 3, 4]",
"type": "integer"
},
"mealsincluded": {
"description": "Whether meals are included or not",
"type": "boolean"
}
}
}
```
<< Example 1. >>
User Query:
I want a hotel in the Balkans with a king sized bed and a hot tub. Budget is $300 a night
Structured Request:
```json
{
"query": "king-sized bed, hot tub",
"filter": "and(in(\"country\", [\"Bulgaria\", \"Greece\", \"Croatia\", \"Serbia\"]), lte(\"onsiterate\", 300))"
}
```
<< Example 2. >>
User Query:
A room with breakfast included for 3 people, at a Hilton
Structured Request:
```json
{
"query": "Hilton",
"filter": "and(eq(\"mealsincluded\", true), gte(\"maxoccupancy\", 3))"
}
```
<< Example 3. >>
User Query:
{query}
Structured Request:
```python
chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0),
doc_contents,
filter_attribute_info,
examples=examples,
)
```
```python
chain.invoke(
{
"query": "Find a 2-person room in Vienna or London, preferably with meals included and AC"
}
)
```
StructuredQuery(query='2-person room, meals included, AC', filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Operation(operator=<Operator.OR: 'or'>, arguments=[Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='city', value='Vienna'), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='city', value='London')]), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='mealsincluded', value=True)]), limit=None)
This seems to have helped! Let's try another complex query:
```python
chain.invoke(
{
"query": "I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace."
}
)
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
File ~/langchain/libs/langchain/langchain/chains/query_constructor/base.py:53, in StructuredQueryOutputParser.parse(self, text)
52 else:
---> 53 parsed["filter"] = self.ast_parse(parsed["filter"])
54 if not parsed.get("limit"):
File ~/langchain/.venv/lib/python3.9/site-packages/lark/lark.py:652, in Lark.parse(self, text, start, on_error)
635 """Parse the given text, according to the options provided.
636
637 Parameters:
(...)
650
651 """
--> 652 return self.parser.parse(text, start=start, on_error=on_error)
File ~/langchain/.venv/lib/python3.9/site-packages/lark/parser_frontends.py:101, in ParsingFrontend.parse(self, text, start, on_error)
100 stream = self._make_lexer_thread(text)
--> 101 return self.parser.parse(stream, chosen_start, **kw)
File ~/langchain/.venv/lib/python3.9/site-packages/lark/parsers/lalr_parser.py:41, in LALR_Parser.parse(self, lexer, start, on_error)
40 try:
---> 41 return self.parser.parse(lexer, start)
42 except UnexpectedInput as e:
File ~/langchain/.venv/lib/python3.9/site-packages/lark/parsers/lalr_parser.py:171, in _Parser.parse(self, lexer, start, value_stack, state_stack, start_interactive)
170 return InteractiveParser(self, parser_state, parser_state.lexer)
--> 171 return self.parse_from_state(parser_state)
File ~/langchain/.venv/lib/python3.9/site-packages/lark/parsers/lalr_parser.py:184, in _Parser.parse_from_state(self, state, last_token)
183 for token in state.lexer.lex(state):
--> 184 state.feed_token(token)
186 end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
File ~/langchain/.venv/lib/python3.9/site-packages/lark/parsers/lalr_parser.py:150, in ParserState.feed_token(self, token, is_end)
148 s = []
--> 150 value = callbacks[rule](s)
152 _action, new_state = states[state_stack[-1]][rule.origin.name]
File ~/langchain/.venv/lib/python3.9/site-packages/lark/parse_tree_builder.py:153, in ChildFilterLALR_NoPlaceholders.__call__(self, children)
152 filtered.append(children[i])
--> 153 return self.node_builder(filtered)
File ~/langchain/.venv/lib/python3.9/site-packages/lark/parse_tree_builder.py:325, in apply_visit_wrapper.<locals>.f(children)
323 @wraps(func)
324 def f(children):
--> 325 return wrapper(func, name, children, None)
File ~/langchain/.venv/lib/python3.9/site-packages/lark/visitors.py:501, in _vargs_inline(f, _data, children, _meta)
500 def _vargs_inline(f, _data, children, _meta):
--> 501 return f(*children)
File ~/langchain/.venv/lib/python3.9/site-packages/lark/visitors.py:479, in _VArgsWrapper.__call__(self, *args, **kwargs)
478 def __call__(self, *args, **kwargs):
--> 479 return self.base_func(*args, **kwargs)
File ~/langchain/libs/langchain/langchain/chains/query_constructor/parser.py:79, in QueryTransformer.func_call(self, func_name, args)
78 if self.allowed_attributes and args[0] not in self.allowed_attributes:
---> 79 raise ValueError(
80 f"Received invalid attributes {args[0]}. Allowed attributes are "
81 f"{self.allowed_attributes}"
82 )
83 return Comparison(comparator=func, attribute=args[0], value=args[1])
ValueError: Received invalid attributes description. Allowed attributes are ['onsiterate', 'maxoccupancy', 'city', 'country', 'starrating', 'mealsincluded']
During handling of the above exception, another exception occurred:
OutputParserException Traceback (most recent call last)
Cell In[21], line 1
----> 1 chain.invoke({"query": "I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace."})
File ~/langchain/libs/langchain/langchain/schema/runnable/base.py:1113, in RunnableSequence.invoke(self, input, config)
1111 try:
1112 for i, step in enumerate(self.steps):
-> 1113 input = step.invoke(
1114 input,
1115 # mark each step as a child run
1116 patch_config(
1117 config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
1118 ),
1119 )
1120 # finish the root run
1121 except BaseException as e:
File ~/langchain/libs/langchain/langchain/schema/output_parser.py:173, in BaseOutputParser.invoke(self, input, config)
169 def invoke(
170 self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None
171 ) -> T:
172 if isinstance(input, BaseMessage):
--> 173 return self._call_with_config(
174 lambda inner_input: self.parse_result(
175 [ChatGeneration(message=inner_input)]
176 ),
177 input,
178 config,
179 run_type="parser",
180 )
181 else:
182 return self._call_with_config(
183 lambda inner_input: self.parse_result([Generation(text=inner_input)]),
184 input,
185 config,
186 run_type="parser",
187 )
File ~/langchain/libs/langchain/langchain/schema/runnable/base.py:633, in Runnable._call_with_config(self, func, input, config, run_type, **kwargs)
626 run_manager = callback_manager.on_chain_start(
627 dumpd(self),
628 input,
629 run_type=run_type,
630 name=config.get("run_name"),
631 )
632 try:
--> 633 output = call_func_with_variable_args(
634 func, input, run_manager, config, **kwargs
635 )
636 except BaseException as e:
637 run_manager.on_chain_error(e)
File ~/langchain/libs/langchain/langchain/schema/runnable/config.py:173, in call_func_with_variable_args(func, input, run_manager, config, **kwargs)
171 if accepts_run_manager(func):
172 kwargs["run_manager"] = run_manager
--> 173 return func(input, **kwargs)
File ~/langchain/libs/langchain/langchain/schema/output_parser.py:174, in BaseOutputParser.invoke.<locals>.<lambda>(inner_input)
169 def invoke(
170 self, input: Union[str, BaseMessage], config: Optional[RunnableConfig] = None
171 ) -> T:
172 if isinstance(input, BaseMessage):
173 return self._call_with_config(
--> 174 lambda inner_input: self.parse_result(
175 [ChatGeneration(message=inner_input)]
176 ),
177 input,
178 config,
179 run_type="parser",
180 )
181 else:
182 return self._call_with_config(
183 lambda inner_input: self.parse_result([Generation(text=inner_input)]),
184 input,
185 config,
186 run_type="parser",
187 )
File ~/langchain/libs/langchain/langchain/schema/output_parser.py:225, in BaseOutputParser.parse_result(self, result, partial)
212 def parse_result(self, result: List[Generation], *, partial: bool = False) -> T:
213 """Parse a list of candidate model Generations into a specific format.
214
215 The return value is parsed from only the first Generation in the result, which
(...)
223 Structured output.
224 """
--> 225 return self.parse(result[0].text)
File ~/langchain/libs/langchain/langchain/chains/query_constructor/base.py:60, in StructuredQueryOutputParser.parse(self, text)
56 return StructuredQuery(
57 **{k: v for k, v in parsed.items() if k in allowed_keys}
58 )
59 except Exception as e:
---> 60 raise OutputParserException(
61 f"Parsing text\n{text}\n raised following error:\n{e}"
62 )
OutputParserException: Parsing text
```json
{
"query": "highly rated, coast, patio, fireplace",
"filter": "and(eq(\"starrating\", 4), contain(\"description\", \"coast\"), contain(\"description\", \"patio\"), contain(\"description\", \"fireplace\"))"
}
```
raised following error:
Received invalid attributes description. Allowed attributes are ['onsiterate', 'maxoccupancy', 'city', 'country', 'starrating', 'mealsincluded']
## Automatically ignoring invalid queries
It seems our model get's tripped up on this more complex query and tries to search over an attribute ('description') that doesn't exist. By setting `fix_invalid=True` in our query constructor chain, we can automatically remove any parts of the filter that is invalid (meaning it's using disallowed operations, comparisons or attributes).
```python
chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0),
doc_contents,
filter_attribute_info,
examples=examples,
fix_invalid=True,
)
```
```python
chain.invoke(
{
"query": "I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace."
}
)
```
StructuredQuery(query='highly rated, coast, patio, fireplace', filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='starrating', value=4), limit=None)
## Using with a self-querying retriever
Now that our query construction chain is in a decent place, let's try using it with an actual retriever. For this example we'll use the [ElasticsearchStore](https://python.langchain.com/docs/integrations/vectorstores/elasticsearch).
```python
from langchain_elasticsearch import ElasticsearchStore
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
```
## Populating vectorstore
The first time you run this, uncomment the below cell to first index the data.
```python
# docs = []
# for _, room in latest_price.fillna("").iterrows():
# doc = Document(
# page_content=json.dumps(room.to_dict(), indent=2),
# metadata=room.to_dict()
# )
# docs.append(doc)
# vecstore = ElasticsearchStore.from_documents(
# docs,
# embeddings,
# es_url="http://localhost:9200",
# index_name="hotel_rooms",
# # strategy=ElasticsearchStore.ApproxRetrievalStrategy(
# # hybrid=True,
# # )
# )
```
```python
vecstore = ElasticsearchStore(
"hotel_rooms",
embedding=embeddings,
es_url="http://localhost:9200",
# strategy=ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True) # seems to not be available in community version
)
```
```python
from langchain.retrievers import SelfQueryRetriever
retriever = SelfQueryRetriever(
query_constructor=chain, vectorstore=vecstore, verbose=True
)
```
```python
results = retriever.invoke(
"I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace."
)
for res in results:
print(res.page_content)
print("\n" + "-" * 20 + "\n")
```
{
"roomtype": "Three-Bedroom House With Sea View",
"onsiterate": 341.75,
"roomamenities": "Additional bathroom: ;Additional toilet: ;Air conditioning: ;Closet: ;Clothes dryer: ;Coffee/tea maker: ;Dishwasher: ;DVD/CD player: ;Fireplace: ;Free Wi-Fi in all rooms!: ;Full kitchen: ;Hair dryer: ;Heating: ;High chair: ;In-room safe box: ;Ironing facilities: ;Kitchenware: ;Linens: ;Microwave: ;Private entrance: ;Refrigerator: ;Seating area: ;Separate dining area: ;Smoke detector: ;Sofa: ;Towels: ;TV [flat screen]: ;Washing machine: ;",
"maxoccupancy": 6,
"roomdescription": "Room size: 125 m\u00b2/1345 ft\u00b2, 2 bathrooms, Shower and bathtub, Shared bathroom, Kitchenette, 3 bedrooms, 1 double bed or 2 single beds or 1 double bed",
"hotelname": "Downings Coastguard Cottages - Type B-E",
"city": "Downings",
"country": "Ireland",
"starrating": 4,
"mealsincluded": false
}
--------------------
{
"roomtype": "Three-Bedroom House With Sea View",
"onsiterate": 774.05,
"roomamenities": "Additional bathroom: ;Additional toilet: ;Air conditioning: ;Closet: ;Clothes dryer: ;Coffee/tea maker: ;Dishwasher: ;DVD/CD player: ;Fireplace: ;Free Wi-Fi in all rooms!: ;Full kitchen: ;Hair dryer: ;Heating: ;High chair: ;In-room safe box: ;Ironing facilities: ;Kitchenware: ;Linens: ;Microwave: ;Private entrance: ;Refrigerator: ;Seating area: ;Separate dining area: ;Smoke detector: ;Sofa: ;Towels: ;TV [flat screen]: ;Washing machine: ;",
"maxoccupancy": 6,
"roomdescription": "Room size: 125 m\u00b2/1345 ft\u00b2, 2 bathrooms, Shower and bathtub, Shared bathroom, Kitchenette, 3 bedrooms, 1 double bed or 2 single beds or 1 double bed",
"hotelname": "Downings Coastguard Cottages - Type B-E",
"city": "Downings",
"country": "Ireland",
"starrating": 4,
"mealsincluded": false
}
--------------------
{
"roomtype": "Four-Bedroom Apartment with Sea View",
"onsiterate": 501.24,
"roomamenities": "Additional toilet: ;Air conditioning: ;Carpeting: ;Cleaning products: ;Closet: ;Clothes dryer: ;Clothes rack: ;Coffee/tea maker: ;Dishwasher: ;DVD/CD player: ;Fireplace: ;Free Wi-Fi in all rooms!: ;Full kitchen: ;Hair dryer: ;Heating: ;High chair: ;In-room safe box: ;Ironing facilities: ;Kitchenware: ;Linens: ;Microwave: ;Private entrance: ;Refrigerator: ;Seating area: ;Separate dining area: ;Smoke detector: ;Sofa: ;Toiletries: ;Towels: ;TV [flat screen]: ;Wake-up service: ;Washing machine: ;",
"maxoccupancy": 9,
"roomdescription": "Room size: 110 m\u00b2/1184 ft\u00b2, Balcony/terrace, Shower and bathtub, Kitchenette, 4 bedrooms, 1 single bed or 1 queen bed or 1 double bed or 2 single beds",
"hotelname": "1 Elliot Terrace",
"city": "Plymouth",
"country": "United Kingdom",
"starrating": 4,
"mealsincluded": false
}
--------------------
{
"roomtype": "Three-Bedroom Holiday Home with Terrace and Sea View",
"onsiterate": 295.83,
"roomamenities": "Air conditioning: ;Dishwasher: ;Free Wi-Fi in all rooms!: ;Full kitchen: ;Heating: ;In-room safe box: ;Kitchenware: ;Private entrance: ;Refrigerator: ;Satellite/cable channels: ;Seating area: ;Separate dining area: ;Sofa: ;Washing machine: ;",
"maxoccupancy": 1,
"roomdescription": "Room size: 157 m\u00b2/1690 ft\u00b2, Balcony/terrace, 3 bathrooms, Shower, Kitchenette, 3 bedrooms, 1 queen bed or 1 queen bed or 1 queen bed or 1 sofa bed",
"hotelname": "Seaside holiday house Artatore (Losinj) - 17102",
"city": "Mali Losinj",
"country": "Croatia",
"starrating": 4,
"mealsincluded": false
}
--------------------
```python
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@cookbook@self_query_hotel_search.ipynb@.PATH_END.py
|
{
"filename": "program-options.md",
"repo_name": "STEllAR-GROUP/octotiger",
"repo_path": "octotiger_extracted/octotiger-master/doc/content/program-options.md",
"type": "Markdown"
}
|
# Program Options
|
STEllAR-GROUPREPO_NAMEoctotigerPATH_START.@octotiger_extracted@octotiger-master@doc@content@program-options.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "grburgess/popsynth",
"repo_path": "popsynth_extracted/popsynth-master/popsynth/distributions/__init__.py",
"type": "Python"
}
|
from .bpl_distribution import BPLDistribution
from .cosmological_distribution import (
CosmologicalDistribution,
SFRDistribution,
ZPowerCosmoDistribution,
)
from .delta_distribution import DeltaDistribution
from .flatland_distribution import FlatlandDistribution
from .log10_normal_distribution import Log10NormalDistribution
from .log_normal_distribution import LogNormalDistribution
from .pareto_distribution import ParetoDistribution
from .schechter_distribution import SchechterDistribution
from .spherical_distribution import (
ConstantSphericalDistribution,
SphericalDistribution,
ZPowerSphericalDistribution,
)
from .spiral_galaxy_distribution import SpiralGalaxyDistribution
from .uniform_distribution import (
LogUniLuminiosityDistribution,
UniformCosmoDistribution,
)
__all__ = [
"SphericalDistribution",
"CosmologicalDistribution",
"SFRDistribution",
"ZPowerCosmoDistribution",
"ParetoDistribution",
"Log10NormalDistribution",
"LogNormalDistribution",
"SchechterDistribution",
"BPLDistribution",
"SphericalDistribution",
"ConstantSphericalDistribution",
"ZPowerSphericalDistribution",
"DeltaDistribution",
"FlatlandDistribution",
"SpiralGalaxyDistribution",
"LogUniLuminiosityDistribution",
"UniformCosmoDistribution",
]
|
grburgessREPO_NAMEpopsynthPATH_START.@popsynth_extracted@popsynth-master@popsynth@distributions@__init__.py@.PATH_END.py
|
{
"filename": "acssimBaseRepresentation.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/acssim/test/acssimBaseRepresentation.py",
"type": "Python"
}
|
#!/usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# @(#) $Id$
#------------------------------------------------------------------------------
__revision__ = "@(#) $Id$"
'''
Tests BaseRepresentation.
'''
from Acssim.Servants.Representations.BaseRepresentation import BaseRepresentation
class Concrete(BaseRepresentation):
def __init__(self):
BaseRepresentation.__init__(self, "HELLOWORLD1")
self.setMethod("displayMessage", { "nonempty" : "constructor"})
if __name__=="__main__":
concrete = Concrete()
print "Concrete.getMethod('displayMessage'):", concrete.getMethod('displayMessage')
concrete.setMethod('displayMessage', { "nonempty" : "main"})
print "Concrete.getMethod('displayMessage'):", concrete.getMethod('displayMessage')
print
print "Concrete.getMethod('nomethod'):", concrete.getMethod('nomethod')
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@acssim@test@acssimBaseRepresentation.py@.PATH_END.py
|
{
"filename": "astropy_helpers.py",
"repo_name": "cta-observatory/ctapipe",
"repo_path": "ctapipe_extracted/ctapipe-main/src/ctapipe/io/astropy_helpers.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
Functions to help adapt internal ctapipe data to astropy formats and conventions
"""
import os
from contextlib import ExitStack
from uuid import uuid4
import numpy as np
import tables
from astropy.table import Table, join
from astropy.time import Time
from .hdf5tableio import (
DEFAULT_FILTERS,
get_column_attrs,
get_column_transforms,
get_node_meta,
)
from .tableio import (
EnumColumnTransform,
QuantityColumnTransform,
StringTransform,
TimeColumnTransform,
)
__all__ = ["read_table", "write_table", "join_allow_empty"]
def read_table(
h5file, path, start=None, stop=None, step=None, condition=None, table_cls=Table
) -> Table:
"""Read a table from an HDF5 file
This reads a table written in the ctapipe format table as an `astropy.table.Table`
object, inversing the column transformations units.
This uses the same conventions as the `~ctapipe.io.HDF5TableWriter`,
with the exception of Enums, that will remain as integers.
(start, stop, step) behave like python slices.
Parameters
----------
h5file: Union[str, Path, tables.file.File]
input filename or PyTables file handle
path: str
path to table in the file
start: int or None
if given, this is the first row to be loaded
stop: int or None
if given, this is the last row to be loaded (not inclusive)
step: int or None
step between rows.
condition: str
A numexpr expression to only load rows fulfilling this condition.
For example, use "hillas_length > 0" to only load rows where the
hillas length is larger than 0 (so not nan and not 0).
Ignored when reading tables that were written using astropy.
Returns
-------
astropy.table.Table:
table in Astropy Format
"""
with ExitStack() as stack:
if not isinstance(h5file, tables.File):
h5file = stack.enter_context(tables.open_file(h5file))
# check if the table was written using astropy table io, if yes
# just use astropy
is_astropy = f"{path}.__table_column_meta__" in h5file.root
if is_astropy:
sl = slice(start, stop, step)
return table_cls.read(h5file.filename, path)[sl]
# support leaving out the leading '/' for consistency with other
# methods
path = os.path.join("/", path)
table = h5file.get_node(path)
if not isinstance(table, tables.Table):
raise OSError(
f"Node {path} is a {table.__class__.__name__}, must be a Table"
)
transforms, descriptions, meta = _parse_hdf5_attrs(table)
if condition is None:
array = table.read(start=start, stop=stop, step=step)
else:
array = table.read_where(
condition=condition, start=start, stop=stop, step=step
)
astropy_table = table_cls(array, meta=meta, copy=False)
for column, tr in transforms.items():
if column not in astropy_table.colnames:
continue
# keep enums as integers, much easier to deal with in tables
if isinstance(tr, EnumColumnTransform):
continue
astropy_table[column] = tr.inverse(astropy_table[column])
for column, desc in descriptions.items():
if column not in astropy_table.colnames:
continue
astropy_table[column].description = desc
return astropy_table
def write_table(
table,
h5file,
path,
append=False,
overwrite=False,
mode="a",
filters=DEFAULT_FILTERS,
):
"""Write a table to an HDF5 file
This writes a table in the ctapipe format into ``h5file``.
attrs.update(transform.get_meta(name))
Parameters
----------
table: astropy.table.Table
The table to be written.
h5file: Union[str, Path, tables.file.File]
input filename or PyTables file handle. If a PyTables file handle,
must be opened writable.
path: str
dataset path inside the ``h5file``
append: bool
Whether to try to append to or replace an existing table
overwrite: bool
If table is already in file and overwrite and append are false,
raise an error.
mode: str
If given a path for ``h5file``, it will be opened in this mode.
See the docs of ``tables.open_file``.
"""
copied = False
parent, table_name = os.path.split(path)
if append and overwrite:
raise ValueError("overwrite and append are mutually exclusive")
with ExitStack() as stack:
if not isinstance(h5file, tables.File):
h5file = stack.enter_context(tables.open_file(h5file, mode=mode))
already_exists = path in h5file.root
if already_exists:
if overwrite and not append:
h5file.remove_node(parent, table_name)
already_exists = False
elif not overwrite and not append:
raise OSError(
f"Table {path} already exists in output file, use append or overwrite"
)
attrs = {}
for pos, (colname, column) in enumerate(table.columns.items()):
if hasattr(column, "description") and column.description is not None:
attrs[f"CTAFIELD_{pos}_DESC"] = column.description
if isinstance(column, Time):
transform = TimeColumnTransform(scale="tai", format="mjd")
attrs.update(transform.get_meta(pos))
if copied is False:
table = table.copy()
copied = True
table[colname] = transform(column)
# TODO: use variable length strings as soon as tables supports them.
# See PyTables/PyTables#48
elif column.dtype.kind == "U":
if copied is False:
table = table.copy()
copied = True
table[colname] = np.array([s.encode("utf-8") for s in column])
transform = StringTransform(table[colname].dtype.itemsize)
attrs.update(transform.get_meta(pos))
elif column.unit is not None:
transform = QuantityColumnTransform(column.unit)
attrs.update(transform.get_meta(pos))
if not already_exists:
h5_table = h5file.create_table(
parent,
table_name,
filters=filters,
expectedrows=len(table),
createparents=True,
obj=table.as_array(),
)
else:
h5_table = h5file.get_node(path)
h5_table.append(table.as_array())
for key, val in table.meta.items():
h5_table.attrs[key] = val
for key, val in attrs.items():
h5_table.attrs[key] = val
def _parse_hdf5_attrs(table):
column_attrs = get_column_attrs(table)
descriptions = {
col_name: attrs.get("DESC", "") for col_name, attrs in column_attrs.items()
}
transforms = get_column_transforms(column_attrs)
meta = get_node_meta(table)
return transforms, descriptions, meta
def join_allow_empty(left, right, keys, join_type="left", keep_order=False, **kwargs):
"""
Join two astropy tables, allowing both sides to be empty tables.
See https://github.com/astropy/astropy/issues/12012 for why
this is necessary.
This behaves as `~astropy.table.join`, with the only difference of
allowing empty tables to be joined.
"""
left_empty = len(left) == 0
right_empty = len(right) == 0
if join_type == "inner":
if left_empty:
return left.copy()
if right_empty:
return right.copy()
elif join_type == "left":
if left_empty or right_empty:
return left.copy()
elif join_type == "right":
if left_empty or right_empty:
return right.copy()
elif join_type == "outer":
if left_empty:
return right.copy()
if right_empty:
return left.copy()
sort_key = None
if keep_order:
sort_key = str(uuid4())
if join_type == "left":
left[sort_key] = np.arange(len(left))
elif join_type == "right":
right[sort_key] = np.arange(len(left))
else:
raise ValueError("keep_order is only supported for left and right joins")
joined = join(left, right, keys, join_type=join_type, **kwargs)
if sort_key is not None:
joined.sort(sort_key)
del joined[sort_key]
return joined
|
cta-observatoryREPO_NAMEctapipePATH_START.@ctapipe_extracted@ctapipe-main@src@ctapipe@io@astropy_helpers.py@.PATH_END.py
|
{
"filename": "input_processor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/prompt-toolkit/py2/prompt_toolkit/key_binding/input_processor.py",
"type": "Python"
}
|
# *** encoding: utf-8 ***
"""
An :class:`~.InputProcessor` receives callbacks for the keystrokes parsed from
the input in the :class:`~prompt_toolkit.inputstream.InputStream` instance.
The `InputProcessor` will according to the implemented keybindings call the
correct callbacks when new key presses are feed through `feed`.
"""
from __future__ import unicode_literals
from prompt_toolkit.buffer import EditReadOnlyBuffer
from prompt_toolkit.filters.cli import ViNavigationMode
from prompt_toolkit.keys import Keys, Key
from prompt_toolkit.utils import Event
from .registry import BaseRegistry
from collections import deque
from six.moves import range
import weakref
import six
__all__ = (
'InputProcessor',
'KeyPress',
)
class KeyPress(object):
"""
:param key: A `Keys` instance or text (one character).
:param data: The received string on stdin. (Often vt100 escape codes.)
"""
def __init__(self, key, data=None):
assert isinstance(key, (six.text_type, Key))
assert data is None or isinstance(data, six.text_type)
if data is None:
data = key.name if isinstance(key, Key) else key
self.key = key
self.data = data
def __repr__(self):
return '%s(key=%r, data=%r)' % (
self.__class__.__name__, self.key, self.data)
def __eq__(self, other):
return self.key == other.key and self.data == other.data
class InputProcessor(object):
"""
Statemachine that receives :class:`KeyPress` instances and according to the
key bindings in the given :class:`Registry`, calls the matching handlers.
::
p = InputProcessor(registry)
# Send keys into the processor.
p.feed(KeyPress(Keys.ControlX, '\x18'))
p.feed(KeyPress(Keys.ControlC, '\x03')
# Process all the keys in the queue.
p.process_keys()
# Now the ControlX-ControlC callback will be called if this sequence is
# registered in the registry.
:param registry: `BaseRegistry` instance.
:param cli_ref: weakref to `CommandLineInterface`.
"""
def __init__(self, registry, cli_ref):
assert isinstance(registry, BaseRegistry)
self._registry = registry
self._cli_ref = cli_ref
self.beforeKeyPress = Event(self)
self.afterKeyPress = Event(self)
# The queue of keys not yet send to our _process generator/state machine.
self.input_queue = deque()
# The key buffer that is matched in the generator state machine.
# (This is at at most the amount of keys that make up for one key binding.)
self.key_buffer = []
# Simple macro recording. (Like readline does.)
self.record_macro = False
self.macro = []
self.reset()
def reset(self):
self._previous_key_sequence = []
self._previous_handler = None
self._process_coroutine = self._process()
self._process_coroutine.send(None)
#: Readline argument (for repetition of commands.)
#: https://www.gnu.org/software/bash/manual/html_node/Readline-Arguments.html
self.arg = None
def start_macro(self):
" Start recording macro. "
self.record_macro = True
self.macro = []
def end_macro(self):
" End recording macro. "
self.record_macro = False
def call_macro(self):
for k in self.macro:
self.feed(k)
def _get_matches(self, key_presses):
"""
For a list of :class:`KeyPress` instances. Give the matching handlers
that would handle this.
"""
keys = tuple(k.key for k in key_presses)
cli = self._cli_ref()
# Try match, with mode flag
return [b for b in self._registry.get_bindings_for_keys(keys) if b.filter(cli)]
def _is_prefix_of_longer_match(self, key_presses):
"""
For a list of :class:`KeyPress` instances. Return True if there is any
handler that is bound to a suffix of this keys.
"""
keys = tuple(k.key for k in key_presses)
cli = self._cli_ref()
# Get the filters for all the key bindings that have a longer match.
# Note that we transform it into a `set`, because we don't care about
# the actual bindings and executing it more than once doesn't make
# sense. (Many key bindings share the same filter.)
filters = set(b.filter for b in self._registry.get_bindings_starting_with_keys(keys))
# When any key binding is active, return True.
return any(f(cli) for f in filters)
def _process(self):
"""
Coroutine implementing the key match algorithm. Key strokes are sent
into this generator, and it calls the appropriate handlers.
"""
buffer = self.key_buffer
retry = False
while True:
if retry:
retry = False
else:
buffer.append((yield))
# If we have some key presses, check for matches.
if buffer:
is_prefix_of_longer_match = self._is_prefix_of_longer_match(buffer)
matches = self._get_matches(buffer)
# When eager matches were found, give priority to them and also
# ignore all the longer matches.
eager_matches = [m for m in matches if m.eager(self._cli_ref())]
if eager_matches:
matches = eager_matches
is_prefix_of_longer_match = False
# Exact matches found, call handler.
if not is_prefix_of_longer_match and matches:
self._call_handler(matches[-1], key_sequence=buffer[:])
del buffer[:] # Keep reference.
# No match found.
elif not is_prefix_of_longer_match and not matches:
retry = True
found = False
# Loop over the input, try longest match first and shift.
for i in range(len(buffer), 0, -1):
matches = self._get_matches(buffer[:i])
if matches:
self._call_handler(matches[-1], key_sequence=buffer[:i])
del buffer[:i]
found = True
break
if not found:
del buffer[:1]
def feed(self, key_press):
"""
Add a new :class:`KeyPress` to the input queue.
(Don't forget to call `process_keys` in order to process the queue.)
"""
assert isinstance(key_press, KeyPress)
self.input_queue.append(key_press)
def process_keys(self):
"""
Process all the keys in the `input_queue`.
(To be called after `feed`.)
Note: because of the `feed`/`process_keys` separation, it is
possible to call `feed` from inside a key binding.
This function keeps looping until the queue is empty.
"""
while self.input_queue:
key_press = self.input_queue.popleft()
if key_press.key != Keys.CPRResponse:
self.beforeKeyPress.fire()
self._process_coroutine.send(key_press)
if key_press.key != Keys.CPRResponse:
self.afterKeyPress.fire()
# Invalidate user interface.
cli = self._cli_ref()
if cli:
cli.invalidate()
def _call_handler(self, handler, key_sequence=None):
was_recording = self.record_macro
arg = self.arg
self.arg = None
event = KeyPressEvent(
weakref.ref(self), arg=arg, key_sequence=key_sequence,
previous_key_sequence=self._previous_key_sequence,
is_repeat=(handler == self._previous_handler))
# Save the state of the current buffer.
cli = event.cli # Can be `None` (In unit-tests only.)
if handler.save_before(event) and cli:
cli.current_buffer.save_to_undo_stack()
# Call handler.
try:
handler.call(event)
self._fix_vi_cursor_position(event)
except EditReadOnlyBuffer:
# When a key binding does an attempt to change a buffer which is
# read-only, we can just silently ignore that.
pass
self._previous_key_sequence = key_sequence
self._previous_handler = handler
# Record the key sequence in our macro. (Only if we're in macro mode
# before and after executing the key.)
if self.record_macro and was_recording:
self.macro.extend(key_sequence)
def _fix_vi_cursor_position(self, event):
"""
After every command, make sure that if we are in Vi navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.)
"""
cli = self._cli_ref()
if cli:
buff = cli.current_buffer
preferred_column = buff.preferred_column
if (ViNavigationMode()(event.cli) and
buff.document.is_cursor_at_the_end_of_line and
len(buff.document.current_line) > 0):
buff.cursor_position -= 1
# Set the preferred_column for arrow up/down again.
# (This was cleared after changing the cursor position.)
buff.preferred_column = preferred_column
class KeyPressEvent(object):
"""
Key press event, delivered to key bindings.
:param input_processor_ref: Weak reference to the `InputProcessor`.
:param arg: Repetition argument.
:param key_sequence: List of `KeyPress` instances.
:param previouskey_sequence: Previous list of `KeyPress` instances.
:param is_repeat: True when the previous event was delivered to the same handler.
"""
def __init__(self, input_processor_ref, arg=None, key_sequence=None,
previous_key_sequence=None, is_repeat=False):
self._input_processor_ref = input_processor_ref
self.key_sequence = key_sequence
self.previous_key_sequence = previous_key_sequence
#: True when the previous key sequence was handled by the same handler.
self.is_repeat = is_repeat
self._arg = arg
def __repr__(self):
return 'KeyPressEvent(arg=%r, key_sequence=%r, is_repeat=%r)' % (
self.arg, self.key_sequence, self.is_repeat)
@property
def data(self):
return self.key_sequence[-1].data
@property
def input_processor(self):
return self._input_processor_ref()
@property
def cli(self):
"""
Command line interface.
"""
return self.input_processor._cli_ref()
@property
def current_buffer(self):
"""
The current buffer.
"""
return self.cli.current_buffer
@property
def arg(self):
"""
Repetition argument.
"""
if self._arg == '-':
return -1
result = int(self._arg or 1)
# Don't exceed a million.
if int(result) >= 1000000:
result = 1
return result
@property
def arg_present(self):
"""
True if repetition argument was explicitly provided.
"""
return self._arg is not None
def append_to_arg_count(self, data):
"""
Add digit to the input argument.
:param data: the typed digit as string
"""
assert data in '-0123456789'
current = self._arg
if data == '-':
assert current is None or current == '-'
result = data
elif current is None:
result = data
else:
result = "%s%s" % (current, data)
self.input_processor.arg = result
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@prompt-toolkit@py2@prompt_toolkit@key_binding@input_processor.py@.PATH_END.py
|
{
"filename": "nonlin.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/optimize/nonlin.py",
"type": "Python"
}
|
r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy._lib.six import callable, exec_, xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getargspec_no_self as _getargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
xin : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/fr16/index.php
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
if norm is None:
self.norm = maxnorm
else:
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
self.method_kw.setdefault('prepend_outer_v', True)
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
args, varargs, varkw, defaults = _getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@optimize@nonlin.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/splom/unselected/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._size import SizeValidator
from ._opacity import OpacityValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._size.SizeValidator",
"._opacity.OpacityValidator",
"._color.ColorValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@splom@unselected@marker@__init__.py@.PATH_END.py
|
{
"filename": "test_verifyStats.py",
"repo_name": "lsst/cp_verify",
"repo_path": "cp_verify_extracted/cp_verify-main/tests/test_verifyStats.py",
"type": "Python"
}
|
# This file is part of cp_verify.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
import unittest
import lsst.utils.tests
import lsst.ip.isr.isrMock as isrMock
import lsst.cp.verify as cpVerify
import lsst.ip.isr.isrFunctions as isrFunctions
def updateMockExp(exposure, addCR=True):
"""Update an exposure with a mask and variance plane.
Parameters
----------
exposure : `lsst.afw.image.Exposure`
Exposure to be modified in place.
addCR : `bool`
Whether a known cosmic ray should be added to ``exposure``.
"""
if addCR:
# Add a cosmic ray
image = exposure.getImage()
image.getArray()[50, 50] = 10000.0
# Set the mask and variance planes:
mask = exposure.getMask()
mask.getArray()[:, 10] = 1
isrFunctions.updateVariance(exposure.getMaskedImage(), 1.0, 5.0)
class ToySubClass(cpVerify.CpVerifyStatsTask):
"""The CpVerifyStatsTask requires an implentation of verify.
"""
def verify(self, inputExp, outputStats):
# Docstring inherited from CpVerifyStatsTask.verify()
verifiedStats = {'A REAL TEST': True, 'A BAD TEST': False}
successValue = True
return verifiedStats, successValue
class VerifyStatsTestCase(lsst.utils.tests.TestCase):
"""Unit test for stats code.
"""
def setUp(self):
"""Generate a mock exposure/camera to test."""
self.inputExp = isrMock.CalibratedRawMock().run()
self.camera = isrMock.IsrMock().getCamera()
self.dimensions = {'instrument': self.camera.getName(),
'exposure': 1234,
'detector': self.camera[10].getName(),
}
updateMockExp(self.inputExp)
def test_failures(self):
"""Test that all the NotImplementedError methods fail correctly."""
results = None
with self.assertRaises(NotImplementedError):
# We have not implemented a verify method
config = cpVerify.CpVerifyStatsConfig()
config.numSigmaClip = 3.0
task = cpVerify.CpVerifyStatsTask(config=config)
results = task.run(self.inputExp, camera=self.camera, dimensions=self.dimensions)
# Or the catalog stats
config.catalogStatKeywords = {'CAT_MEAN', 'MEDIAN'}
task = cpVerify.CpVerifyStatsTask(config=config)
results = task.run(self.inputExp, camera=self.camera, dimensions=self.dimensions)
# Or the detector stats
config.catalogStatKeywords = {}
config.detectorStatKeywords = {'DET_SIGMA', 'STDEV'}
task = cpVerify.CpVerifyStatsTask(config=config)
results = task.run(self.inputExp, camera=self.camera, dimensions=self.dimensions)
self.assertIsNone(results)
def test_generic(self):
"""Test a subset of the output values to identify that the
image stat methods haven't changed.
"""
config = cpVerify.CpVerifyStatsConfig()
config.imageStatKeywords = {'MEAN': 'MEAN', 'MEDIAN': 'MEDIAN', 'CLIPPED': 'MEANCLIP',
'SIGMA': 'STDEV'}
config.unmaskedImageStatKeywords = {'un_MEAN': 'MEAN', 'un_MEDIAN': 'MEDIAN',
'un_CLIPPED': 'MEANCLIP',
'un_SIGMA': 'STDEV'}
config.crImageStatKeywords = {'cr_MEAN': 'MEAN', 'cr_MEDIAN': 'MEDIAN', 'cr_CLIPPED': 'MEANCLIP',
'cr_SIGMA': 'STDEV'}
config.normImageStatKeywords = {'norm_MEAN': 'MEAN', 'norm_MEDIAN': 'MEDIAN',
'norm_CLIPPED': 'MEANCLIP',
'norm_SIGMA': 'STDEV'}
config.numSigmaClip = 3.0
task = ToySubClass(config=config)
results = task.run(self.inputExp, camera=self.camera, dimensions=self.dimensions)
resultStats = results.outputStats
self.assertAlmostEqual(resultStats['AMP']['C:0,0']['MEAN'], 1506.06976, 4)
self.assertAlmostEqual(resultStats['AMP']['C:0,0']['un_MEAN'], 1501.0299, 4)
self.assertAlmostEqual(resultStats['AMP']['C:0,0']['norm_MEAN'], 301.213957, 4)
self.assertAlmostEqual(resultStats['AMP']['C:0,0']['cr_MEAN'], 1504.2776, 4)
self.assertTrue(resultStats['VERIFY']['A REAL TEST'])
self.assertFalse(resultStats['VERIFY']['A BAD TEST'])
self.assertTrue(resultStats['SUCCESS'])
class VerifyBiasTestCase(lsst.utils.tests.TestCase):
"""Unit test for stats code - bias cases."""
def setUp(self):
"""Generate a mock exposure/camera to test."""
config = isrMock.IsrMockConfig()
config.isTrimmed = True
config.rngSeed = 12345
biasExposure = isrMock.BiasMock(config=config).run()
config.rngSeed = 54321
fakeBias = isrMock.BiasMock(config=config).run()
self.inputExp = biasExposure.clone()
mi = self.inputExp.getMaskedImage()
mi.scaledMinus(1.0, fakeBias.getMaskedImage())
updateMockExp(self.inputExp)
self.camera = isrMock.IsrMock().getCamera()
detector = self.camera[20]
self.inputExp.setDetector(detector)
self.dimensions = {'instrument': self.camera.getName(),
'exposure': 1234,
'detector': detector.getName(),
}
# This is here to test accessing metadata info from the
# exposure header.
md = self.inputExp.getMetadata()
for amp in detector.getAmplifiers():
md[f"LSST ISR OVERSCAN RESIDUAL SERIAL STDEV {amp.getName()}"] = 4.25
def test_bias(self):
"""Test a subset of the output values to identify that the
image stat methods haven't changed.
"""
config = cpVerify.CpVerifyBiasConfig()
config.numSigmaClip = 3.0
config.ampCornerBoxSize = 15
task = cpVerify.CpVerifyBiasTask(config=config)
results = task.run(self.inputExp, camera=self.camera, dimensions=self.dimensions)
biasStats = results.outputStats
self.assertAlmostEqual(biasStats['AMP']['C:0,0']['MEAN'], 2.08672, 4)
self.assertAlmostEqual(biasStats['AMP']['C:0,0']['NOISE'], 13.99547, 4)
self.assertAlmostEqual(biasStats['AMP']['C:0,0']['CR_NOISE'], 14.11526, 4)
# This order swap in intended. :sad-panda-emoji:
self.assertAlmostEqual(biasStats['METADATA']['READ_NOISE']['C:0,0'], 4.25)
self.assertIn(biasStats['SUCCESS'], [True, False])
class VerifyDarkTestCase(lsst.utils.tests.TestCase):
"""Unit test for stats code - dark cases.
"""
def setUp(self):
"""Generate a mock exposure/camera to test."""
config = isrMock.IsrMockConfig()
config.isTrimmed = True
config.rngSeed = 12345
darkExposure = isrMock.DarkMock(config=config).run()
config.rngSeed = 54321
fakeDark = isrMock.DarkMock(config=config).run()
self.inputExp = darkExposure.clone()
mi = self.inputExp.getMaskedImage()
mi.scaledMinus(1.0, fakeDark.getMaskedImage())
updateMockExp(self.inputExp)
self.camera = isrMock.IsrMock().getCamera()
detector = self.camera[20]
self.inputExp.setDetector(detector)
self.dimensions = {'instrument': self.camera.getName(),
'exposure': 1234,
'detector': detector.getName(),
}
# Populate the READ_NOISE into the exposure header
md = self.inputExp.getMetadata()
for amp in detector.getAmplifiers():
md[f"LSST ISR OVERSCAN RESIDUAL SERIAL STDEV {amp.getName()}"] = 5.24
def test_dark(self):
"""Test a subset of the output values to identify that the
image stat methods haven't changed.
"""
config = cpVerify.CpVerifyDarkConfig()
config.numSigmaClip = 3.0
task = cpVerify.CpVerifyDarkTask(config=config)
results = task.run(self.inputExp,
camera=self.camera,
dimensions=self.dimensions)
darkStats = results.outputStats
self.assertAlmostEqual(darkStats['AMP']['C:0,0']['MEAN'], 2.0043, 4)
self.assertAlmostEqual(darkStats['AMP']['C:0,0']['NOISE'], 3.12948, 4)
self.assertAlmostEqual(darkStats['AMP']['C:0,0']['CR_NOISE'], 3.15946, 4)
# This order swap in intended. :sad-panda-emoji:
self.assertAlmostEqual(darkStats['METADATA']['READ_NOISE']['C:0,0'], 5.24)
self.assertIn(darkStats['SUCCESS'], [True, False])
class VerifyDefectsTestCase(lsst.utils.tests.TestCase):
"""Unit test for stats code - defect cases."""
defectFlux = 100000 # Flux to use for simulated defect.
def setUp(self):
"""Generate a mock exposure/camera to test."""
config = isrMock.IsrMockConfig()
config.isTrimmed = True
config.doGenerateImage = True
config.doAddFringe = False
config.doAddSource = False
config.doAddSky = True
config.doAddOverscan = False
config.doAddCrosstalk = False
config.doAddBias = False
config.doAddDark = False
config.doAddFlat = False
config.doAddFringe = False
config.skyLevel = 1000
config.rngSeed = 12345
self.inputExp = isrMock.IsrMock(config=config).run()
# These are simulated defects
self.inputExp.getImage().getArray()[0, 0] = -1.0 * self.defectFlux
self.inputExp.getImage().getArray()[40, 50] = self.defectFlux
self.inputExp.getImage().getArray()[75, 50] = np.nan
updateMockExp(self.inputExp, addCR=False)
self.inputExp.getMask().getArray()[0, 0] = 1
self.inputExp.getMask().getArray()[40, 50] = 1
self.inputExp.getMask().getArray()[75, 50] = 1
self.camera = isrMock.IsrMock().getCamera()
self.dimensions = {'instrument': self.camera.getName(),
'exposure': 1234,
'visit': 1234,
'detector': self.camera[10].getName(),
}
def test_defects(self):
"""Test a subset of the output values to identify that the
image stat methods haven't changed.
"""
config = cpVerify.CpVerifyDefectsConfig()
config.numSigmaClip = 3.0
# The catalog objects are `lsst.afw.table.SourceCatalog`
# but the task catalog tests only check number of
# detections before and after applying defects, so
# arrays will do in this case.
# With defects applied
inputCatalogMock = np.arange(1, 100)
# Without defects applied
uncorrectedCatalogMock = np.arange(1, 200)
task = cpVerify.CpVerifyDefectsTask(config=config)
# Also use the inputExp as uncorrectedExposure.
results = task.run(self.inputExp,
camera=self.camera,
uncorrectedExp=self.inputExp,
inputCatalog=inputCatalogMock,
uncorrectedCatalog=uncorrectedCatalogMock,
dimensions=self.dimensions)
defectStats = results.outputStats
self.assertEqual(defectStats['AMP']['C:0,0']['DEFECT_PIXELS'], 53)
self.assertEqual(defectStats['AMP']['C:0,0']['OUTLIERS'], 17)
self.assertEqual(defectStats['AMP']['C:0,0']['STAT_OUTLIERS'], 3)
self.assertAlmostEqual(defectStats['AMP']['C:0,0']['MEDIAN'], 999.466, 4)
self.assertAlmostEqual(defectStats['AMP']['C:0,0']['STDEV'], 30.96303, 4)
self.assertAlmostEqual(defectStats['AMP']['C:0,0']['MIN'], 881.56146, 4)
self.assertAlmostEqual(defectStats['AMP']['C:0,0']['MAX'], 1124.19934, 4)
self.assertEqual(defectStats['AMP']['C:0,0']['UNMASKED_MIN'], -1.0 * self.defectFlux, 4)
self.assertEqual(defectStats['AMP']['C:0,0']['UNMASKED_MAX'], self.defectFlux, 4)
self.assertEqual(defectStats['CATALOG']['NUM_OBJECTS_BEFORE'], 199)
self.assertEqual(defectStats['CATALOG']['NUM_OBJECTS_AFTER'], 99)
self.assertEqual(defectStats['DET']['NUM_COSMICS_BEFORE'], 0)
self.assertEqual(defectStats['DET']['NUM_COSMICS_AFTER'], 0)
self.assertIn(defectStats['SUCCESS'], [True, False])
class MemoryTester(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
|
lsstREPO_NAMEcp_verifyPATH_START.@cp_verify_extracted@cp_verify-main@tests@test_verifyStats.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/indicator/delta/decreasing/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._symbol import SymbolValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._symbol.SymbolValidator", "._color.ColorValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@indicator@delta@decreasing@__init__.py@.PATH_END.py
|
{
"filename": "test_theil_sen.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/linear_model/tests/test_theil_sen.py",
"type": "Python"
}
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import os
import re
import sys
from contextlib import contextmanager
import numpy as np
import pytest
from numpy.testing import (
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
)
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model._theil_sen import (
_breakdown_point,
_modified_weiszfeld_step,
_spatial_median,
)
from sklearn.utils._testing import assert_almost_equal
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, "w") as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.0
if intercept:
c = 2.0
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5.0, 10.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5.0, 10.0, 42.0, 7.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1.0, 2.0, 3.0]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.0
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.0
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1.0, 2.0, 3.0]).reshape(1, 3)
y = X[0]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1.0, 2.0, 3.0]).reshape(3, 1)
true_median = 2.0
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.0e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
warning_message = "Maximum number of iterations 30 reached in spatial median."
with pytest.warns(ConvergenceWarning, match=warning_message):
_spatial_median(X, max_iter=30, tol=0.0)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert np.abs(lstq.coef_ - w) > 0.9
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert np.abs(lstq.coef_ - w - c) > 0.5
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.0)
# non-regression test for #18104
theil_sen.score(X, y)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert np.abs(bp - 1 + 1 / (np.sqrt(2))) < 1.0e-6
@pytest.mark.parametrize(
"param, ExceptionCls, match",
[
(
{"n_subsamples": 1},
ValueError,
re.escape("Invalid parameter since n_features+1 > n_subsamples (2 > 1)"),
),
(
{"n_subsamples": 101},
ValueError,
re.escape("Invalid parameter since n_subsamples > n_samples (101 > 50)"),
),
],
)
def test_checksubparams_invalid_input(param, ExceptionCls, match):
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(**param, random_state=0)
with pytest.raises(ExceptionCls, match=match):
theil_sen.fit(X, y)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=2, random_state=0, max_subpopulation=2e3).fit(
X, y
)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
# TODO(1.8): Remove
def test_copy_X_deprecated():
X, y, _, _ = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(copy_X=True, random_state=0)
with pytest.warns(FutureWarning, match="`copy_X` was deprecated"):
theil_sen.fit(X, y)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@linear_model@tests@test_theil_sen.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "PhaseTracer/PhaseTracer",
"repo_path": "PhaseTracer_extracted/PhaseTracer-master/README.md",
"type": "Markdown"
}
|
<h1 align="center">
PhaseTracer
</h1>
<div align="center">
<i>Trace cosmological phases, phase transitions, and gravitational waves in scalar-field theories</i>
</div>
<br>
<div align="center">
<img alt="GitHub Actions Workflow Status" src="https://img.shields.io/github/actions/workflow/status/PhaseTracer/PhaseTracer/cmake-single-platform.yml">
<img alt="GitHub License" src="https://img.shields.io/github/license/PhaseTracer/PhaseTracer">
<img alt="Static Badge" src="https://img.shields.io/badge/arXiv-2003.02859-blue?link=https%3A%2F%2Farxiv.org%2Fabs%2F2003.02859">
</div>
<br>
**PhaseTracer** is a C++14 software package for tracing cosmological phases, finding potential phase transitions, computing the bounce action, and plotting the gravitational wave spectrum for Standard Model extensions with any number of scalar fields.
## Dependencies
You need a C++14 compliant compiler and our dependencies. The dependencies can be installed by
*Ubuntu/Debian*
sudo apt install libalglib-dev libnlopt-cxx-dev libeigen3-dev libboost-filesystem-dev libboost-log-dev libgsl-dev
*Fedora*
sudo dnf install alglib-devel nlopt-devel eigen3-devel boost-devel gsl-devel
*Mac*
brew install alglib nlopt eigen boost gsl
If alglib is not found, see https://github.com/S-Dafarra/alglib-cmake
## Building
To build the shared library and the examples:
git clone https://github.com/PhaseTracer/PhaseTracer
cd PhaseTracer
mkdir build
cd build
cmake ..
make
## Running
If the build was succesful, run the examples and tests with:
cd ..
./bin/run_1D_test_model
./bin/run_2D_test_model
./bin/scan_Z2_scalar_singlet_model
./bin/unit_tests
If you want to see debugging information or obtain plots of the phases and potential for the first two examples above you can add the -d flag, i.e.
./bin/run_1D_test_model -d
./bin/run_2D_test_model -d
## BubbleProfiler
<details>
<summary>Click me</summary>
To use `BubbleProfiler` for calculation of bounce action:
cmake -D BUILD_WITH_BP=ON ..
make
Then run the example with:
cd ..
./bin/run_BP_2d
./bin/run_BP_scale 1 0.6 200
or in other examples by setting
PhaseTracer::ActionCalculator ac(model);
ac.set_action_calculator(PhaseTracer::ActionMethod::BubbleProfiler);
</details>
## FlexibleSUSY
<details>
<summary>Click me</summary>
To build the example `THDMIISNMSSMBCsimple` with FlexibleSUSY:
cmake -D BUILD_WITH_FS=ON ..
make
Then run the example with:
cd ..
./bin/run_THDMIISNMSSMBCsimple
FlexibleSUSY has additional dependencies and will report errors if
these are not present. See the FlexibleSUSY documentation for details
and/or follow the suggestions from the cmake output.
</details>
## BSMPT
<details>
<summary>Click me</summary>
To build the examples with BSMPT:
cmake -D BUILD_WITH_BSMPT=ON ..
make
Then run the examples with:
cd ..
./bin/run_R2HDM
./bin/run_C2HDM
./bin/run_N2HDM
Please note that the BSMPT examples in PhaseTacer are just for checking that PhaseTacer and BSMPT can give consistent results. Unsuccessful compilation of BSMPT will not affect other examples and BSMPT is not neccessary for PhaseTracer users unless they wish to use potentials from BSMPT.
</details>
## Citing
If you use PhaseTracer, please cite the accompanying manual
@article{Athron:2020sbe,
author = "Athron, Peter and Bal\'azs, Csaba and Fowlie, Andrew and Zhang, Yang",
title = "{PhaseTracer: tracing cosmological phases and calculating transition properties}",
eprint = "2003.02859",
archivePrefix = "arXiv",
primaryClass = "hep-ph",
reportNumber = "CoEPP-MN-20-3",
doi = "10.1140/epjc/s10052-020-8035-2",
journal = "Eur. Phys. J. C",
volume = "80",
number = "6",
pages = "567",
year = "2020"
}
|
PhaseTracerREPO_NAMEPhaseTracerPATH_START.@PhaseTracer_extracted@PhaseTracer-master@README.md@.PATH_END.py
|
{
"filename": "_labels.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/pie/_labels.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="labels", parent_name="pie", **kwargs):
super(LabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@pie@_labels.py@.PATH_END.py
|
{
"filename": "test_config.py",
"repo_name": "mikecokina/elisa",
"repo_path": "elisa_extracted/elisa-master/unittests/test_config.py",
"type": "Python"
}
|
# keep it first
# due to stupid astropy units/constants implementation
from unittests import set_astropy_units
import numpy as np
from numpy.testing import assert_array_equal
from elisa import settings
from unittests.utils import ElisaTestCase
set_astropy_units()
class TestConfig(ElisaTestCase):
def test_LD_LAW_TO_FILE_PREFIX(self):
laws = "linear", "cosine", "logarithmic", "square_root"
expected = ["lin", "lin", "log", "sqrt"]
obtained = [settings.LD_LAW_TO_FILE_PREFIX[law] for law in laws]
assert_array_equal(expected, obtained)
def test_LD_LAW_CFS_COLUMNS(self):
laws = "linear", "cosine", "logarithmic", "square_root"
expected = [["xlin"], ["xlin"], ["xlog", "ylog"], ["xsqrt", "ysqrt"]]
obtained = [settings.LD_LAW_CFS_COLUMNS[law] for law in laws]
assert_array_equal(expected, obtained)
def test_LD_DOMAIN_COLS(self):
expected = ["temperature", "gravity"]
obtained = settings.LD_DOMAIN_COLS
assert_array_equal(expected, obtained)
def test_atm_dataframe_main_cols(self):
expected = ["flux", "wave"]
obtained = [settings.ATM_MODEL_DATAFRAME_FLUX, settings.ATM_MODEL_DATAFRAME_WAVE]
assert_array_equal(expected, obtained)
def test_passband_main_cols(self):
expected = ["throughput", "wavelength"]
obtained = [settings.PASSBAND_DATAFRAME_THROUGHPUT, settings.PASSBAND_DATAFRAME_WAVE]
assert_array_equal(expected, obtained)
def test__update_atlas_to_base_dir(self):
settings.configure(CK04_ATM_TABLES="x")
settings._update_atlas_to_base_dir()
ck_values = [v for k, v in settings.ATLAS_TO_BASE_DIR.items() if str(k).startswith("c")]
self.assertTrue(np.all(ck_values == ['x'] * len(ck_values)))
|
mikecokinaREPO_NAMEelisaPATH_START.@elisa_extracted@elisa-master@unittests@test_config.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/title/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="layout.title", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "layoutstyle"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@title@_text.py@.PATH_END.py
|
{
"filename": "test_geometry.py",
"repo_name": "cta-observatory/ctapipe",
"repo_path": "ctapipe_extracted/ctapipe-main/src/ctapipe/instrument/camera/tests/test_geometry.py",
"type": "Python"
}
|
""" Tests for CameraGeometry """
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import AltAz, SkyCoord
from ctapipe.instrument import CameraGeometry, PixelShape
from ctapipe.instrument.warnings import FromNameWarning
def test_construct():
"""Check we can make a CameraGeometry from scratch"""
x = np.linspace(-10, 10, 100)
y = np.linspace(-10, 10, 100)
geom = CameraGeometry(
name="Unknown",
pix_id=np.arange(100),
pix_x=x * u.m,
pix_y=y * u.m,
pix_area=x * u.m**2,
pix_type=PixelShape.SQUARE,
pix_rotation="10d",
cam_rotation="12d",
)
assert geom.name == "Unknown"
assert geom.pix_area is not None
assert (geom.pix_rotation.deg - 10) < 1e-5
assert (geom.cam_rotation.deg - 10) < 1e-5
with pytest.raises(TypeError):
geom = CameraGeometry(
name="Unknown",
pix_id=np.arange(100),
pix_x=x * u.m,
pix_y=y * u.m,
pix_area=x * u.m**2,
pix_type="foo",
pix_rotation="10d",
cam_rotation="12d",
)
# test from string:
geom = CameraGeometry(
name="Unknown",
pix_id=np.arange(100),
pix_x=x * u.m,
pix_y=y * u.m,
pix_area=x * u.m**2,
pix_type="rectangular",
pix_rotation="10d",
cam_rotation="12d",
)
def test_make_rectangular_camera_geometry():
"""Check that we can construct a dummy camera with square geometry"""
geom = CameraGeometry.make_rectangular()
assert geom.pix_x.shape == geom.pix_y.shape
def test_load_lst_camera(prod5_lst):
"""test that a specific camera has the expected attributes"""
assert len(prod5_lst.camera.geometry.pix_x) == 1855
assert prod5_lst.camera.geometry.pix_type == PixelShape.HEXAGON
def test_position_to_pix_index(prod5_lst):
"""test that we can lookup a pixel from a coordinate"""
geometry = prod5_lst.camera.geometry
x, y = (0.80 * u.m, 0.79 * u.m)
pix_id = geometry.position_to_pix_index(x, y)
assert pix_id == 1575
pix_ids = geometry.position_to_pix_index([0.8, 0.8] * u.m, [0.79, 0.79] * u.m)
np.testing.assert_array_equal(pix_ids, [1575, 1575])
assert len(geometry.position_to_pix_index([] * u.m, [] * u.m)) == 0
assert geometry.position_to_pix_index(5 * u.m, 5 * u.m) == np.iinfo(int).min
def test_find_neighbor_pixels():
"""test basic neighbor functionality"""
n_pixels_grid = 5
x, y = u.Quantity(
np.meshgrid(
np.linspace(-5, 5, n_pixels_grid), np.linspace(-5, 5, n_pixels_grid)
),
u.cm,
)
x = x.ravel()
y = y.ravel()
n_pixels = len(x)
geom = CameraGeometry(
"test",
pix_id=np.arange(n_pixels),
pix_area=u.Quantity(np.full(n_pixels, 4), u.cm**2),
pix_x=x.ravel(),
pix_y=y.ravel(),
pix_type="rectangular",
)
neigh = geom.neighbors
assert set(neigh[11]) == {16, 6, 10, 12}
def test_neighbor_pixels(camera_geometry):
"""
test if each camera has a reasonable number of neighbor pixels (4 for
rectangular, and 6 for hexagonal. Other than edge pixels, the majority
should have the same value
"""
geom = camera_geometry
n_pix = len(geom.pix_id)
n_neighbors = [len(x) for x in geom.neighbors]
if geom.pix_type == PixelShape.HEXAGON:
assert n_neighbors.count(6) > 0.5 * n_pix
assert n_neighbors.count(6) > n_neighbors.count(4)
if geom.pix_type == PixelShape.SQUARE:
assert n_neighbors.count(4) > 0.5 * n_pix
assert n_neighbors.count(5) == 0
assert n_neighbors.count(6) == 0
# whipple has inhomogenious pixels that mess with pixel neighborhood
# calculation
if not geom.name.startswith("Whipple"):
assert np.all(geom.neighbor_matrix == geom.neighbor_matrix.T)
assert n_neighbors.count(1) == 0 # no pixel should have a single neighbor
def test_calc_pixel_neighbors_square():
x, y = np.meshgrid(np.arange(20), np.arange(20))
cam = CameraGeometry(
name="test",
pix_id=np.arange(400),
pix_type="rectangular",
pix_x=u.Quantity(x.ravel(), u.cm),
pix_y=u.Quantity(y.ravel(), u.cm),
pix_area=u.Quantity(np.ones(400), u.cm**2),
)
assert set(cam.neighbors[0]) == {1, 20}
assert set(cam.neighbors[21]) == {1, 20, 22, 41}
def test_calc_pixel_neighbors_square_diagonal():
"""
check that neighbors for square-pixel cameras are what we expect,
namely that the diagonals are included if requested.
"""
x, y = np.meshgrid(np.arange(20), np.arange(20))
cam = CameraGeometry(
name="test",
pix_id=np.arange(400),
pix_type="rectangular",
pix_x=u.Quantity(x.ravel(), u.cm),
pix_y=u.Quantity(y.ravel(), u.cm),
pix_area=u.Quantity(np.ones(400), u.cm**2),
)
cam._neighbors = cam.calc_pixel_neighbors(diagonal=True)
assert set(cam.neighbors[21]) == {0, 1, 2, 20, 22, 40, 41, 42}
def test_to_and_from_table(prod5_lst):
"""Check converting to and from an astropy Table"""
prod5_lst_cam = prod5_lst.camera.geometry
tab = prod5_lst_cam.to_table()
prod5_lst_cam2 = prod5_lst_cam.from_table(tab)
assert prod5_lst_cam.name == prod5_lst_cam2.name
assert (prod5_lst_cam.pix_x == prod5_lst_cam2.pix_x).all()
assert (prod5_lst_cam.pix_y == prod5_lst_cam2.pix_y).all()
assert (prod5_lst_cam.pix_area == prod5_lst_cam2.pix_area).all()
assert prod5_lst_cam.pix_type == prod5_lst_cam2.pix_type
def test_write_read(tmpdir, prod5_lst):
"""Check that serialization to disk doesn't lose info"""
filename = str(tmpdir.join("testcamera.fits.gz"))
prod5_lst_cam = prod5_lst.camera.geometry
prod5_lst_cam.to_table().write(filename, overwrite=True)
prod5_lst_cam2 = prod5_lst_cam.from_table(filename)
assert prod5_lst_cam.name == prod5_lst_cam2.name
assert (prod5_lst_cam.pix_x == prod5_lst_cam2.pix_x).all()
assert (prod5_lst_cam.pix_y == prod5_lst_cam2.pix_y).all()
assert (prod5_lst_cam.pix_area == prod5_lst_cam2.pix_area).all()
assert prod5_lst_cam.pix_type == prod5_lst_cam2.pix_type
def test_precal_neighbors():
"""
test that pre-calculated neighbor lists don't get
overwritten by automatic ones
"""
geom = CameraGeometry(
name="TestCam",
pix_id=np.arange(3),
pix_x=np.arange(3) * u.deg,
pix_y=np.arange(3) * u.deg,
pix_area=np.ones(3) * u.deg**2,
neighbors=[[1], [0, 2], [1]],
pix_type="rectangular",
pix_rotation="0deg",
cam_rotation="0deg",
)
neigh = geom.neighbors
assert len(neigh) == len(geom.pix_x)
nmat = geom.neighbor_matrix
assert nmat.shape == (len(geom.pix_x), len(geom.pix_x))
assert np.all(nmat.T == nmat)
def test_slicing(prod5_mst_nectarcam):
"""Check that we can slice a camera into a smaller one"""
prod5_nectarcam = prod5_mst_nectarcam.camera.geometry
sliced1 = prod5_nectarcam[100:200]
assert len(sliced1.pix_x) == 100
assert len(sliced1.pix_y) == 100
assert len(sliced1.pix_area) == 100
assert len(sliced1.pix_id) == 100
sliced2 = prod5_nectarcam[[5, 7, 8, 9, 10]]
assert sliced2.pix_id[0] == 5
assert sliced2.pix_id[1] == 7
assert len(sliced2.pix_x) == 5
def test_slicing_rotation(camera_geometry):
"""Check that we can rotate and slice"""
camera_geometry.rotate("25d")
sliced1 = camera_geometry[5:10]
assert sliced1.pix_x[0] == camera_geometry.pix_x[5]
def test_rectangle_patch_neighbors():
""" " test that a simple rectangular camera has the expected neighbors"""
pix_x = np.array([-1.1, 0.1, 0.9, -1, 0, 1, -0.9, -0.1, 1.1]) * u.m
pix_y = np.array([1.1, 1, 0.9, -0.1, 0, 0.1, -0.9, -1, -1.1]) * u.m
pix_area = np.full(len(pix_x), 0.01) * u.m**2
cam = CameraGeometry(
name="testcam",
pix_id=np.arange(pix_x.size),
pix_x=pix_x,
pix_y=pix_y,
pix_area=pix_area,
pix_type="rectangular",
)
assert np.all(cam.neighbor_matrix.T == cam.neighbor_matrix)
assert cam.neighbor_matrix.sum(axis=0).max() == 4
assert cam.neighbor_matrix.sum(axis=0).min() == 2
def test_border_pixels(prod5_lst, prod3_astri):
"""check we can find border pixels"""
prod5_lst_cam = prod5_lst.camera.geometry
assert np.sum(prod5_lst_cam.get_border_pixel_mask(1)) == 168
assert np.sum(prod5_lst_cam.get_border_pixel_mask(2)) == 330
prod3_astri_cam = prod3_astri.camera.geometry
assert np.sum(prod3_astri_cam.get_border_pixel_mask(1)) == 212
assert np.sum(prod3_astri_cam.get_border_pixel_mask(2)) == 408
assert prod3_astri_cam.get_border_pixel_mask(1)[0]
assert prod3_astri_cam.get_border_pixel_mask(1)[2351]
assert not prod3_astri_cam.get_border_pixel_mask(1)[521]
def test_equals(prod5_lst, prod5_mst_nectarcam):
"""check we can use the == operator"""
cam1 = prod5_lst.camera.geometry
cam2 = deepcopy(prod5_lst.camera.geometry)
cam3 = prod5_mst_nectarcam.camera.geometry
assert cam1 is not cam2
assert cam1 == cam2
assert cam1 != cam3
def test_hashing(prod5_lst, prod5_mst_nectarcam):
""" " check that hashes are correctly computed"""
cam1 = prod5_lst.camera.geometry
cam2 = deepcopy(prod5_lst.camera.geometry)
cam3 = prod5_mst_nectarcam.camera.geometry
assert len(set([cam1, cam2, cam3])) == 2
def test_camera_from_name(camera_geometry):
"""check we can construct all cameras from name"""
with pytest.warns(FromNameWarning):
camera = CameraGeometry.from_name(camera_geometry.name)
assert str(camera) == camera_geometry.name
def test_camera_coordinate_transform(camera_geometry):
"""test conversion of the coordinates stored in a camera frame"""
from ctapipe.coordinates import (
CameraFrame,
EngineeringCameraFrame,
NominalFrame,
TelescopeFrame,
)
geom = camera_geometry
trans_geom = geom.transform_to(EngineeringCameraFrame())
unit = geom.pix_x.unit
assert np.allclose(geom.pix_x.to_value(unit), -trans_geom.pix_y.to_value(unit))
assert np.allclose(geom.pix_y.to_value(unit), -trans_geom.pix_x.to_value(unit))
# also test converting into a spherical frame:
focal_length = 1.2 * u.m
geom.frame = CameraFrame(focal_length=focal_length)
pointing_position = SkyCoord(alt=70 * u.deg, az=0 * u.deg, frame=AltAz())
telescope_frame = TelescopeFrame(telescope_pointing=pointing_position)
geom_tel_frame = geom.transform_to(telescope_frame)
x = geom_tel_frame.pix_x.to_value(u.deg)
assert len(x) == len(geom.pix_x)
# nominal frame with large offset, regression test for #2028
origin = pointing_position.directional_offset_by(0 * u.deg, 5 * u.deg)
nominal_frame = NominalFrame(origin=origin)
geom_nominal = geom_tel_frame.transform_to(nominal_frame)
# test that pixel sizes are still the same, i.e. calculation is taking translation into account
assert u.allclose(geom_nominal.pix_area, geom_tel_frame.pix_area, rtol=0.01)
# and test going backward from spherical to cartesian:
geom_cam = geom_tel_frame.transform_to(CameraFrame(focal_length=focal_length))
assert np.allclose(geom_cam.pix_x.to_value(unit), geom.pix_x.to_value(unit))
def test_guess_width():
x = u.Quantity([0, 1, 2], u.cm)
y = u.Quantity([0, 0, 0], u.cm)
assert u.isclose(CameraGeometry.guess_pixel_width(x, y), 1 * u.cm)
def test_pixel_width():
geom = CameraGeometry(
"test",
pix_id=[1],
pix_area=[2] * u.cm**2,
pix_x=[0] * u.m,
pix_y=[0] * u.m,
pix_type="hex",
)
assert np.isclose(geom.pixel_width.to_value(u.cm), [2 * np.sqrt(1 / np.sqrt(3))])
geom = CameraGeometry(
"test",
pix_id=[1],
pix_area=[2] * u.cm**2,
pix_x=[0] * u.m,
pix_y=[0] * u.m,
pix_type="rect",
)
assert np.isclose(geom.pixel_width.to_value(u.cm), [np.sqrt(2)])
def test_guess_radius(prod5_lst, prod5_sst):
prod5_lst_cam = prod5_lst.camera.geometry
assert u.isclose(prod5_lst_cam.guess_radius(), 1.1 * u.m, rtol=0.05)
prod5_chec = prod5_sst.camera.geometry
assert u.isclose(prod5_chec.guess_radius(), 0.16 * u.m, rtol=0.05)
def test_single_pixel(prod5_lst):
"""Regression test for #2316"""
single_pixel = prod5_lst.camera.geometry[[0]]
assert single_pixel.neighbor_matrix.shape == (1, 1)
assert single_pixel.neighbor_matrix[0, 0]
def test_empty(prod5_lst):
geometry = prod5_lst.camera.geometry
mask = np.zeros(len(geometry), dtype=bool)
empty = geometry[mask]
assert empty.neighbor_matrix.shape == (0, 0)
|
cta-observatoryREPO_NAMEctapipePATH_START.@ctapipe_extracted@ctapipe-main@src@ctapipe@instrument@camera@tests@test_geometry.py@.PATH_END.py
|
{
"filename": "test_gsl_splines.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/pyccl/tests/test_gsl_splines.py",
"type": "Python"
}
|
import pyccl as ccl
import numpy as np
def test_spline1d():
cosmo = ccl.CosmologyVanillaLCDM()
cosmo.compute_distances()
chi_gsl_spline = cosmo.cosmo.data.chi
a_arr, chi_arr = ccl.pyutils._get_spline1d_arrays(chi_gsl_spline)
chi = ccl.comoving_radial_distance(cosmo, a_arr)
assert np.allclose(chi_arr, chi)
def test_spline2d():
x = np.linspace(0.1, 1, 10)
log_y = np.linspace(-3, 1, 20)
zarr_in = np.outer(x, np.exp(log_y))
pk2d = ccl.Pk2D(a_arr=x, lk_arr=log_y, pk_arr=zarr_in,
is_logp=False)
pk2d_gsl_spline2d = pk2d.psp.fka
xarr, yarr, zarr_out_spline = \
ccl.pyutils._get_spline2d_arrays(pk2d_gsl_spline2d)
cosmo = ccl.CosmologyVanillaLCDM()
zarr_out_eval = pk2d(k=np.exp(log_y), a=x[-1], cosmo=cosmo)
assert np.allclose(x, xarr)
assert np.allclose(log_y, yarr)
assert np.allclose(zarr_in, zarr_out_spline)
assert np.allclose(zarr_in[-1], zarr_out_eval)
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@pyccl@tests@test_gsl_splines.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/ohlc/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="ohlc", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@ohlc@_text.py@.PATH_END.py
|
{
"filename": "driver.py",
"repo_name": "California-Planet-Search/rvsearch",
"repo_path": "rvsearch_extracted/rvsearch-master/rvsearch/driver.py",
"type": "Python"
}
|
"""
Driver functions for the rvsearch pipeline.\
These functions are meant to be used only with\
the `cli.py` command line interface.
"""
from __future__ import print_function
import warnings
import os
import copy
import pandas as pd
import pickle
import radvel
from radvel.utils import working_directory
import rvsearch
def run_search(args):
"""Run a search from a given RadVel setup file
Args:
args (ArgumentParser): command line arguments
"""
config_file = args.setupfn
conf_base = os.path.basename(config_file).split('.')[0]
P, post = radvel.utils.initialize_posterior(config_file)
if args.mstar is None:
try:
args.mstar = (P.stellar['mstar'], P.stellar['mstar_err'])
except (AttributeError, KeyError):
pass
else:
args.mstar = [float(x) for x in args.mstar]
#starname = P.starname + '_' + conf_base
starname = conf_base
data = P.data
if args.known and P.nplanets > 0:
ipost = copy.deepcopy(post)
#post.params['dvdt'].vary = args.trend
#if not args.trend:
# post.params['dvdt'].value = 0.0
post = radvel.fitting.maxlike_fitting(post, verbose=True)
else:
post = None
max_planets = args.maxplanets
searcher = rvsearch.search.Search(data, starname=starname,
min_per=args.minP,
workers=args.num_cpus,
post=post,
trend=args.trend,
verbose=args.verbose,
mcmc=args.mcmc,
mstar=args.mstar,
max_planets=max_planets)
searcher.run_search(outdir=args.output_dir)
def injections(args):
"""Injection-recovery tests
Args:
args (ArgumentParser): command line arguments
"""
plim = (args.minP, args.maxP)
klim = (args.minK, args.maxK)
elim = (args.minE, args.maxE)
beta_e = args.betaE
rstar = args.rstar
teff = args.teff
sdir = args.search_dir
with working_directory(sdir):
sfile = 'search.pkl'
sfile = os.path.abspath(sfile)
if not os.path.exists(sfile):
print("No search file found in {}".format(sdir))
os._exit(1)
if not os.path.exists('recoveries.csv') or args.overwrite:
try:
inj = rvsearch.inject.Injections(sfile, plim, klim, elim,
num_sim=args.num_inject,
full_grid=args.full_grid,
verbose=args.verbose,
beta_e=beta_e)
recoveries = inj.run_injections(num_cpus=args.num_cpus)
inj.save()
except IOError:
print("WARNING: Problem with {}".format(sfile))
os._exit(1)
else:
recoveries = pd.read_csv('recoveries.csv')
def plots(args):
"""
Generate plots
Args:
args (ArgumentParser): command line arguments
"""
sdir = args.search_dir
with working_directory(sdir):
sfile = os.path.abspath('search.pkl')
run_name = sfile.split('/')[-2]
if not os.path.exists(sfile):
print("No search file found in {}".format(sdir))
os._exit(1)
else:
searcher = pickle.load(open(sfile, 'rb'))
for ptype in args.type:
print("Creating {} plot for {}".format(ptype, run_name))
if ptype == 'recovery':
rfile = os.path.abspath('recoveries.csv')
if not os.path.exists(rfile):
print("No recovery file found in {}".format(sdir))
os._exit(1)
xcol = 'inj_au'
ycol = 'inj_msini'
xlabel = '$a$ [AU]'
ylabel = r'M$\sin{i_p}$ [M$_\oplus$]'
print("Plotting {} vs. {}".format(ycol, xcol))
mstar = searcher.mstar
comp = rvsearch.Completeness.from_csv(rfile, xcol=xcol,
ycol=ycol, mstar=mstar)
cplt = rvsearch.plots.CompletenessPlots(comp, searches=[searcher])
fig = cplt.completeness_plot(title=run_name,
xlabel=xlabel,
ylabel=ylabel)
saveto = os.path.join(run_name+'_recoveries.{}'.format(args.fmt))
fig.savefig(saveto, dpi=200)
print("Recovery plot saved to {}".format(
os.path.abspath(saveto)))
if ptype == 'summary':
plotter = rvsearch.plots.PeriodModelPlot(searcher,
saveplot='{}_summary.{}'.format(searcher.starname, args.fmt))
plotter.plot_summary()
|
California-Planet-SearchREPO_NAMErvsearchPATH_START.@rvsearch_extracted@rvsearch-master@rvsearch@driver.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/tst/regression/scripts/tests/chemistry/__init__.py",
"type": "Python"
}
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@athena-master@tst@regression@scripts@tests@chemistry@__init__.py@.PATH_END.py
|
|
{
"filename": "training.ipynb",
"repo_name": "b-biswas/MADNESS",
"repo_path": "MADNESS_extracted/MADNESS-main/docs/tutorial_notebooks/training.ipynb",
"type": "Jupyter Notebook"
}
|
# Training
## Load modules
```python
import os
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from madness_deblender.callbacks import define_callbacks
from madness_deblender.FlowVAEnet import FlowVAEnet
from madness_deblender.losses import (
deblender_encoder_loss_wrapper,
deblender_loss_fn_wrapper,
)
from madness_deblender.utils import get_data_dir_path
tfd = tfp.distributions
```
2024-06-20 17:23:53.186556: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F AVX512_VNNI FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-06-20 17:23:53.412473: I tensorflow/core/util/port.cc:104] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2024-06-20 17:23:53.420171: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /pbs/throng/lsst/users/bbiswas/miniconda3/envs/madness/lib/:
2024-06-20 17:23:53.420211: I tensorflow/compiler/xla/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2024-06-20 17:23:56.401902: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /pbs/throng/lsst/users/bbiswas/miniconda3/envs/madness/lib/:
2024-06-20 17:23:56.402465: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /pbs/throng/lsst/users/bbiswas/miniconda3/envs/madness/lib/:
2024-06-20 17:23:56.402483: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.
2024-06-20 17:24:07.657709: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /pbs/throng/lsst/users/bbiswas/miniconda3/envs/madness/lib/:
2024-06-20 17:24:07.657765: W tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:265] failed call to cuInit: UNKNOWN ERROR (303)
2024-06-20 17:24:07.657802: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (jns-bbiswas.cc.in2p3.fr): /proc/driver/nvidia/version does not exist
2024-06-20 17:24:07.660142: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F AVX512_VNNI FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
## Creating toy data
```python
isolated_noisy_galaxies = np.random.rand(8, 11, 11, 6)
noiseless_galaxies = np.random.rand(8, 11, 11, 6)
blended_galaxies = np.random.rand(8, 11, 11, 6)
```
## Define the model
```python
kl_prior = tfd.Independent(
tfd.Normal(loc=tf.zeros(1), scale=1), reinterpreted_batch_ndims=1
)
f_net = FlowVAEnet(
stamp_shape=11,
latent_dim=4,
filters_encoder=[1, 1, 1, 1],
filters_decoder=[1, 1, 1],
kernels_encoder=[1, 1, 1, 1],
kernels_decoder=[1, 1, 1],
dense_layer_units=1,
num_nf_layers=1,
kl_prior=kl_prior,
kl_weight=1,
)
```
## Train VAE as a denoiser
```python
vae_epochs = 2
data = np.random.rand(8, 11, 11, 6)
# Keras Callbacks
data_path = get_data_dir_path()
path_weights = os.path.join(data_path, "test_temp")
callbacks = define_callbacks(
os.path.join(path_weights, "vae"),
lr_scheduler_epochs=1,
patience=1,
)
_ = f_net.train_vae(
(isolated_noisy_galaxies[:6], noiseless_galaxies[:6]), # training
(isolated_noisy_galaxies[6:], noiseless_galaxies[6:]), # validation
callbacks=callbacks,
epochs=int(0.5 * vae_epochs),
train_encoder=True,
train_decoder=True,
track_kl=True,
optimizer=tf.keras.optimizers.Adam(1e-5, clipvalue=0.1),
loss_function=deblender_loss_fn_wrapper(
sigma_cutoff=np.array([1] * 6), # Noise level in the data
linear_norm_coeff=1, # coefficient of linear normalization
),
verbose=2,
# loss_function=vae_loss_fn_wrapper(sigma=noise_sigma, linear_norm_coeff=linear_norm_coeff),
)
```
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) [(None, 11, 11, 6)] 0
encoder (Functional) (None, 14) 94
latent_space (MultivariateN ((None, 4), 0
ormalTriL) (None, 4))
decoder (Functional) (None, 11, 11, 6) 420
=================================================================
Total params: 514
Trainable params: 514
Non-trainable params: 0
_________________________________________________________________
--- Training only VAE network ---
Encoder status: True
Decoder status: True
Number of epochs: 1
Epoch 1: val_mse improved from inf to 0.33792, saving model to /pbs/throng/lsst/users/bbiswas/FlowDeblender/madness_deblender/data/test_temp/vae/val_mse/weights.ckpt
Epoch 1: val_loss improved from inf to 142.60703, saving model to /pbs/throng/lsst/users/bbiswas/FlowDeblender/madness_deblender/data/test_temp/vae/val_loss/weights.ckpt
1/1 - 8s - loss: 139.7931 - mse: 0.3308 - kl_metric: 0.6579 - val_loss: 142.6070 - val_mse: 0.3379 - val_kl_metric: 1.1026 - lr: 4.0000e-06 - 8s/epoch - 8s/step
## Train Normalizing Flow
```python
f_net.load_vae_weights(os.path.join(path_weights, "vae", "val_loss"))
```
```python
flow_epochs = 2
callbacks = define_callbacks(
os.path.join(path_weights, "flow"),
lr_scheduler_epochs=1,
patience=1,
)
hist_flow = f_net.train_flow(
(
isolated_noisy_galaxies[:6],
np.zeros_like(isolated_noisy_galaxies[:6]),
), # training
(
isolated_noisy_galaxies[6:],
np.zeros_like(isolated_noisy_galaxies[6:]),
), # validation
callbacks=callbacks,
optimizer=tf.keras.optimizers.Adam(1e-4, clipvalue=0.01),
epochs=flow_epochs,
verbose=2,
)
```
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) [(None, 11, 11, 6)] 0
encoder (Functional) (None, 14) 94
latent_space (MultivariateN ((None, 4), 0
ormalTriL) (None, 4))
flow (Functional) (None,) 1480
=================================================================
Total params: 1,574
Trainable params: 1,480
Non-trainable params: 94
_________________________________________________________________
--- Training only FLOW network ---
Number of epochs: 2
Epoch 1/2
2024-06-20 17:24:21.309233: I tensorflow/compiler/xla/service/service.cc:173] XLA service 0x7f99cc040ee0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2024-06-20 17:24:21.309292: I tensorflow/compiler/xla/service/service.cc:181] StreamExecutor device (0): Host, Default Version
2024-06-20 17:24:24.947340: I tensorflow/compiler/jit/xla_compilation_cache.cc:477] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
WARNING:tensorflow:Can save best model only with val_mse available, skipping.
Can save best model only with val_mse available, skipping.
Epoch 1: val_loss improved from inf to 5.81680, saving model to /pbs/throng/lsst/users/bbiswas/FlowDeblender/madness_deblender/data/test_temp/flow/val_loss/weights.ckpt
1/1 - 9s - loss: 5.4337 - val_loss: 5.8168 - lr: 4.0000e-05 - 9s/epoch - 9s/step
Epoch 2/2
WARNING:tensorflow:Can save best model only with val_mse available, skipping.
Can save best model only with val_mse available, skipping.
Epoch 2: val_loss improved from 5.81680 to 5.41266, saving model to /pbs/throng/lsst/users/bbiswas/FlowDeblender/madness_deblender/data/test_temp/flow/val_loss/weights.ckpt
1/1 - 0s - loss: 5.3843 - val_loss: 5.4127 - lr: 1.6000e-05 - 390ms/epoch - 390ms/step
## Train VAE-deblender
```python
f_net_original = FlowVAEnet(
stamp_shape=11,
latent_dim=4,
filters_encoder=[1, 1, 1, 1],
filters_decoder=[1, 1, 1],
kernels_encoder=[1, 1, 1, 1],
kernels_decoder=[1, 1, 1],
dense_layer_units=1,
num_nf_layers=1,
kl_prior=kl_prior,
kl_weight=1,
)
f_net_original.load_vae_weights(os.path.join(path_weights, "vae", "val_loss"))
```
```python
callbacks = define_callbacks(
os.path.join(path_weights, "deblender"),
lr_scheduler_epochs=1,
patience=1,
)
```
```python
hist_deblender = f_net.train_encoder(
(blended_galaxies[:6], isolated_noisy_galaxies[:6]), # training
(blended_galaxies[6:], isolated_noisy_galaxies[6:]), # validation
callbacks=callbacks,
epochs=2,
optimizer=tf.keras.optimizers.Adam(1e-5, clipvalue=0.1),
loss_function=deblender_encoder_loss_wrapper(
original_encoder=f_net_original.encoder,
noise_sigma=np.array([1] * 6),
latent_dim=4,
),
verbose=2,
# loss_function=vae_loss_fn_wrapper(sigma=noise_sigma, linear_norm_coeff=linear_norm_coeff),
)
```
Model: "encoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 11, 11, 6)] 0
conv2d (Conv2D) (None, 6, 6, 1) 7
p_re_lu (PReLU) (None, 6, 6, 1) 36
conv2d_1 (Conv2D) (None, 3, 3, 1) 2
p_re_lu_1 (PReLU) (None, 3, 3, 1) 9
conv2d_2 (Conv2D) (None, 2, 2, 1) 2
p_re_lu_2 (PReLU) (None, 2, 2, 1) 4
conv2d_3 (Conv2D) (None, 1, 1, 1) 2
p_re_lu_3 (PReLU) (None, 1, 1, 1) 1
flatten (Flatten) (None, 1) 0
dense (Dense) (None, 1) 2
p_re_lu_4 (PReLU) (None, 1) 1
dense_1 (Dense) (None, 14) 28
=================================================================
Total params: 94
Trainable params: 0
Non-trainable params: 94
_________________________________________________________________
--- Training only encoder network ---
Number of epochs: 2
Epoch 1/2
WARNING:tensorflow:Can save best model only with val_mse available, skipping.
Can save best model only with val_mse available, skipping.
Epoch 1: val_loss improved from inf to 3.59225, saving model to /pbs/throng/lsst/users/bbiswas/FlowDeblender/madness_deblender/data/test_temp/deblender/val_loss/weights.ckpt
1/1 - 3s - loss: 4.7975 - val_loss: 3.5923 - lr: 4.0000e-06 - 3s/epoch - 3s/step
Epoch 2/2
WARNING:tensorflow:Can save best model only with val_mse available, skipping.
Can save best model only with val_mse available, skipping.
Epoch 2: val_loss improved from 3.59225 to 2.49609, saving model to /pbs/throng/lsst/users/bbiswas/FlowDeblender/madness_deblender/data/test_temp/deblender/val_loss/weights.ckpt
1/1 - 0s - loss: 5.8734 - val_loss: 2.4961 - lr: 1.6000e-06 - 110ms/epoch - 110ms/step
```python
```
|
b-biswasREPO_NAMEMADNESSPATH_START.@MADNESS_extracted@MADNESS-main@docs@tutorial_notebooks@training.ipynb@.PATH_END.py
|
{
"filename": "test_scaling_converter.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/test/suite/core_tests/test_scaling_converter.py",
"type": "Python"
}
|
from amuse.units import scaling_converter
from amuse.units import nbody_system
from amuse.test import amusetest
class TestScalingConverter(amusetest.TestCase):
def test1(self):
converter = scaling_converter.ScalingConverter(
length=0.2,
time=0.1,
)
input = 1 | nbody_system.time
output = converter.convert(input)
self.assertAlmostRelativeEquals(output, 0.1 | nbody_system.time)
def test2(self):
converter = scaling_converter.ScalingConverter(
length=0.2,
time=0.1,
)
input = 1 | nbody_system.length ** 2
output = converter.convert(input)
self.assertAlmostRelativeEquals(output, 0.2 * 0.2 | nbody_system.length ** 2)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@test@suite@core_tests@test_scaling_converter.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.