text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Implementation of Regression on Order Statistics for imputing left-
censored (non-detect data)
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset.
Author: Paul M. Hobson
Company: Geosyntec Consultants (Portland, OR)
Date: 2016-06-14
"""
from __future__ import division
import warnings
import numpy
from scipy import stats
import pandas
from statsmodels.compat.pandas import sort_values
def _ros_sort(df, observations, censorship, warn=False):
"""
This function prepares a dataframe for ROS.
It sorts ascending with
left-censored observations first. Censored observations larger than
the maximum uncensored observations are removed from the dataframe.
Parameters
----------
df : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
------
sorted_df : pandas.DataFrame
The sorted dataframe with all columns dropped except the
observation and censorship columns.
"""
# separate uncensored data from censored data
censored = sort_values(df[df[censorship]], observations, axis=0)
uncensored = sort_values(df[~df[censorship]], observations, axis=0)
if censored[observations].max() > uncensored[observations].max():
censored = censored[censored[observations] <= uncensored[observations].max()]
if warn:
msg = ("Dropping censored observations greater than "
"the max uncensored observation.")
warnings.warn(msg)
return censored.append(uncensored)[[observations, censorship]].reset_index(drop=True)
def cohn_numbers(df, observations, censorship):
"""
Computes the Cohn numbers for the detection limits in the dataset.
The Cohn Numbers are:
- :math:`A_j =` the number of uncensored obs above the jth
threshold.
- :math:`B_j =` the number of observations (cen & uncen) below
the jth threshold.
- :math:`C_j =` the number of censored observations at the jth
threshold.
- :math:`\mathrm{PE}_j =` the probability of exceeding the jth
threshold
- :math:`\mathrm{DL}_j =` the unique, sorted detection limits
- :math:`\mathrm{DL}_{j+1} = \mathrm{DL}_j` shifted down a
single index (row)
Parameters
----------
dataframe : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
cohn : pandas.DataFrame
"""
def nuncen_above(row):
""" A, the number of uncensored obs above the given threshold.
"""
# index of observations above the lower_dl DL
above = df[observations] >= row['lower_dl']
# index of observations below the upper_dl DL
below = df[observations] < row['upper_dl']
# index of non-detect observations
detect = df[censorship] == False
# return the number of observations where all conditions are True
return df[above & below & detect].shape[0]
def nobs_below(row):
""" B, the number of observations (cen & uncen) below the given
threshold
"""
# index of data less than the lower_dl DL
less_than = df[observations] < row['lower_dl']
# index of data less than or equal to the lower_dl DL
less_thanequal = df[observations] <= row['lower_dl']
# index of detects, non-detects
uncensored = df[censorship] == False
censored = df[censorship] == True
# number observations less than or equal to lower_dl DL and non-detect
LTE_censored = df[less_thanequal & censored].shape[0]
# number of observations less than lower_dl DL and detected
LT_uncensored = df[less_than & uncensored].shape[0]
# return the sum
return LTE_censored + LT_uncensored
def ncen_equal(row):
""" C, the number of censored observations at the given
threshold.
"""
censored_index = df[censorship]
censored_data = df[observations][censored_index]
censored_below = censored_data == row['lower_dl']
return censored_below.sum()
def set_upper_limit(cohn):
""" Sets the upper_dl DL for each row of the Cohn dataframe. """
if cohn.shape[0] > 1:
return cohn['lower_dl'].shift(-1).fillna(value=numpy.inf)
else:
return [numpy.inf]
def compute_PE(A, B):
""" Computes the probability of excedance for each row of the
Cohn dataframe. """
N = len(A)
PE = numpy.empty(N, dtype='float64')
PE[-1] = 0.0
for j in range(N-2, -1, -1):
PE[j] = PE[j+1] + (1 - PE[j+1]) * A[j] / (A[j] + B[j])
return PE
# unique, sorted detection limts
censored_data = df[censorship]
DLs = pandas.unique(df.loc[censored_data, observations])
DLs.sort()
# if there is a observations smaller than the minimum detection limit,
# add that value to the array
if DLs.shape[0] > 0:
if df[observations].min() < DLs.min():
DLs = numpy.hstack([df[observations].min(), DLs])
# create a dataframe
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
cohn = pandas.DataFrame(DLs, columns=['lower_dl'])
cohn.loc[:, 'upper_dl'] = set_upper_limit(cohn)
cohn.loc[:, 'nuncen_above'] = cohn.apply(nuncen_above, axis=1)
cohn.loc[:, 'nobs_below'] = cohn.apply(nobs_below, axis=1)
cohn.loc[:, 'ncen_equal'] = cohn.apply(ncen_equal, axis=1)
cohn = cohn.reindex(range(DLs.shape[0] + 1))
cohn.loc[:, 'prob_exceedance'] = compute_PE(cohn['nuncen_above'], cohn['nobs_below'])
else:
dl_cols = ['lower_dl', 'upper_dl', 'nuncen_above',
'nobs_below', 'ncen_equal', 'prob_exceedance']
cohn = pandas.DataFrame(numpy.empty((0, len(dl_cols))), columns=dl_cols)
return cohn
def _detection_limit_index(obs, cohn):
"""
Locates the corresponding detection limit for each observation.
Basically, creates an array of indices for the detection limits
(Cohn numbers) corresponding to each data point.
Parameters
----------
obs : float
A single observation from the larger dataset.
cohn : pandas.DataFrame
Dataframe of Cohn numbers.
Returns
-------
det_limit_index : int
The index of the corresponding detection limit in `cohn`
See also
--------
cohn_numbers
"""
if cohn.shape[0] > 0:
index, = numpy.where(cohn['lower_dl'] <= obs)
det_limit_index = index[-1]
else:
det_limit_index = 0
return det_limit_index
def _ros_group_rank(df, dl_idx, censorship):
"""
Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : pandas.DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : numpy.array
Array of ranks for the dataset.
"""
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
ranks = df.copy()
ranks.loc[:, 'rank'] = 1
ranks = (
ranks.groupby(by=[dl_idx, censorship])['rank']
.transform(lambda g: g.cumsum())
)
return ranks
def _ros_plot_pos(row, censorship, cohn):
"""
ROS-specific plotting positions.
Computes the plotting position for an observation based on its rank,
censorship status, and detection limit index.
Parameters
----------
row : pandas.Series or dict-like
Full observation (row) from a censored dataset. Requires a
'rank', 'detection_limit', and `censorship` column.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : pandas.DataFrame
Dataframe of Cohn numbers.
Returns
-------
plotting_position : float
See also
--------
cohn_numbers
"""
DL_index = row['det_limit_index']
rank = row['rank']
censored = row[censorship]
dl_1 = cohn.iloc[DL_index]
dl_2 = cohn.iloc[DL_index + 1]
if censored:
return (1 - dl_1['prob_exceedance']) * rank / (dl_1['ncen_equal']+1)
else:
return (1 - dl_1['prob_exceedance']) + (dl_1['prob_exceedance'] - dl_2['prob_exceedance']) * \
rank / (dl_1['nuncen_above']+1)
def _norm_plot_pos(observations):
"""
Computes standard normal (Gaussian) plotting positions using scipy.
Parameters
----------
observations : array-like
Sequence of observed quantities.
Returns
-------
plotting_position : array of floats
"""
ppos, sorted_res = stats.probplot(observations, fit=False)
return stats.norm.cdf(ppos)
def plotting_positions(df, censorship, cohn):
"""
Compute the plotting positions for the observations.
The ROS-specific plotting postions are based on the observations'
rank, censorship status, and corresponding detection limit.
Parameters
----------
df : pandas.DataFrame
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
cohn : pandas.DataFrame
Dataframe of Cohn numbers.
Returns
-------
plotting_position : array of float
See also
--------
cohn_numbers
"""
plot_pos = df.apply(lambda r: _ros_plot_pos(r, censorship, cohn), axis=1)
# correctly sort the plotting positions of the ND data:
ND_plotpos = plot_pos[df[censorship]]
ND_plotpos.values.sort()
plot_pos[df[censorship]] = ND_plotpos
return plot_pos
def _impute(df, observations, censorship, transform_in, transform_out):
"""
Executes the basic regression on order stat (ROS) proceedure.
Uses ROS to impute censored from the best-fit line of a
probability plot of the uncensored values.
Parameters
----------
df : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`numpy.log` and `numpy.exp` are used, respectively.
Returns
-------
estimated : pandas.DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# detect/non-detect selectors
uncensored_mask = df[censorship] == False
censored_mask = df[censorship] == True
# fit a line to the logs of the detected data
fit_params = stats.linregress(
df['Zprelim'][uncensored_mask],
transform_in(df[observations][uncensored_mask])
)
# pull out the slope and intercept for use later
slope, intercept = fit_params[:2]
# model the data based on the best-fit curve
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
df.loc[:, 'estimated'] = transform_out(slope * df['Zprelim'][censored_mask] + intercept)
df.loc[:, 'final'] = numpy.where(df[censorship], df['estimated'], df[observations])
return df
def _do_ros(df, observations, censorship, transform_in, transform_out):
"""
Dataframe-centric function to impute censored valies with ROS.
Prepares a dataframe for, and then esimates the values of a censored
dataset using Regression on Order Statistics
Parameters
----------
df : pandas.DataFrame
observations : str
Name of the column in the dataframe that contains observed
values. Censored values should be set to the detection (upper)
limit.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
transform_in, transform_out : callable
Transformations to be applied to the data prior to fitting
the line and after estimated values from that line. Typically,
`numpy.log` and `numpy.exp` are used, respectively.
Returns
-------
estimated : pandas.DataFrame
A new dataframe with two new columns: "estimated" and "final".
The "estimated" column contains of the values inferred from the
best-fit line. The "final" column contains the estimated values
only where the original observations were censored, and the original
observations everwhere else.
"""
# compute the Cohn numbers
cohn = cohn_numbers(df, observations=observations, censorship=censorship)
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
modeled = _ros_sort(df, observations=observations, censorship=censorship)
modeled.loc[:, 'det_limit_index'] = modeled[observations].apply(_detection_limit_index, args=(cohn,))
modeled.loc[:, 'rank'] = _ros_group_rank(modeled, 'det_limit_index', censorship)
modeled.loc[:, 'plot_pos'] = plotting_positions(modeled, censorship, cohn)
modeled.loc[:, 'Zprelim'] = stats.norm.ppf(modeled['plot_pos'])
return _impute(modeled, observations, censorship, transform_in, transform_out)
def impute_ros(observations, censorship, df=None, min_uncensored=2,
max_fraction_censored=0.8, substitution_fraction=0.5,
transform_in=numpy.log, transform_out=numpy.exp,
as_array=True):
"""
Impute censored dataset using Regression on Order Statistics (ROS).
Method described in *Nondetects and Data Analysis* by Dennis R.
Helsel (John Wiley, 2005) to estimate the left-censored (non-detect)
values of a dataset. When there is insufficient non-censorded data,
simple substitution is used.
Parameters
----------
observations : str or array-like
Label of the column or the float array of censored observations
censorship : str
Label of the column or the bool array of the censorship
status of the observations.
* True if censored,
* False if uncensored
df : pandas.DataFrame, optional
If `observations` and `censorship` are labels, this is the
DataFrame that contains those columns.
min_uncensored : int (default is 2)
The minimum number of uncensored values required before ROS
can be used to impute the censored observations. When this
criterion is not met, simple substituion is used instead.
max_fraction_censored : float (default is 0.8)
The maximum fraction of censored data below which ROS can be
used to impute the censored observations. When this fraction is
exceeded, simple substituion is used instead.
substitution_fraction : float (default is 0.5)
The fraction of the detection limit to be used during simple
substitution of the censored values.
transform_in : callable (default is numpy.log)
Transformation to be applied to the values prior to fitting a
line to the plotting positions vs. uncensored values.
transform_out : callable (default is numpy.exp)
Transformation to be applied to the imputed censored values
estimated from the previously computed best-fit line.
as_array : bool (default is True)
When True, a numpy array of the imputed observations is
returned. Otherwise, a modified copy of the original dataframe
with all of the intermediate calculations is returned.
Returns
-------
imputed : numpy.array (default) or pandas.DataFrame
The final observations where the censored values have either been
imputed through ROS or substituted as a fraction of the
detection limit.
Notes
-----
This function requires pandas 0.14 or more recent.
"""
# process arrays into a dataframe, if necessary
if df is None:
df = pandas.DataFrame({'obs': observations, 'cen': censorship})
observations = 'obs'
censorship = 'cen'
# basic counts/metrics of the dataset
N_observations = df.shape[0]
N_censored = df[censorship].astype(int).sum()
N_uncensored = N_observations - N_censored
fraction_censored = N_censored / N_observations
# add plotting positions if there are no censored values
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
if N_censored == 0:
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
# substitute w/ fraction of the DLs if there's insufficient
# uncensored data
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
elif (N_uncensored < min_uncensored) or (fraction_censored > max_fraction_censored):
output = df[[observations, censorship]].copy()
output.loc[:, 'final'] = df[observations]
output.loc[df[censorship], 'final'] *= substitution_fraction
# normal ROS stuff
else:
output = _do_ros(df, observations, censorship, transform_in, transform_out)
# convert to an array if necessary
if as_array:
output = output['final'].values
return output
|
yl565/statsmodels
|
statsmodels/imputation/ros.py
|
Python
|
bsd-3-clause
| 19,098
|
[
"Gaussian"
] |
a5a6d6f2f1bd3b5b8448e1e30f5ebc4a1425870a5148a32b74bceb3ac933eb0d
|
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import mtd
from mantid.simpleapi import Abins, DeleteWorkspace
from AbinsModules import AbinsParameters, AbinsTestHelpers
try:
from pathos.multiprocessing import ProcessingPool
PATHOS_FOUND = True
except ImportError:
PATHOS_FOUND = False
class AbinsAdvancedParametersTest(unittest.TestCase):
def setUp(self):
# set up input for Abins
self._Si2 = "Si2-sc_AbinsAdvancedParameters"
self._wrk_name = self._Si2 + "_ref"
# before each test set AbinsParameters to default values
AbinsParameters.fwhm = 3.0
AbinsParameters.delta_width = 0.0005
AbinsParameters.tosca_final_neutron_energy = 32.0
AbinsParameters.tosca_cos_scattering_angle = -0.7069
AbinsParameters.tosca_a = 0.0000001
AbinsParameters.tosca_b = 0.005
AbinsParameters.tosca_c = 2.5
AbinsParameters.ab_initio_group = "PhononAB"
AbinsParameters.powder_data_group = "Powder"
AbinsParameters.crystal_data_group = "Crystal"
AbinsParameters.s_data_group = "S"
AbinsParameters.pkt_per_peak = 50
AbinsParameters.bin_width = 1.0
AbinsParameters.max_wavenumber = 4100.0
AbinsParameters.min_wavenumber = 0.0
AbinsParameters.s_relative_threshold = 0.001
AbinsParameters.s_absolute_threshold = 10e-8
AbinsParameters.optimal_size = 5000000
AbinsParameters.threads = 1
def tearDown(self):
AbinsTestHelpers.remove_output_files(list_of_names=["AbinsAdvanced"])
mtd.clear()
def test_wrong_fwhm(self):
# fwhm should be positive
AbinsParameters.fwhm = -1.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# fwhm should be larger than 0
AbinsParameters.fwhm = 0.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# fwhm should be smaller than 10
AbinsParameters.fwhm = 10.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_delta_width(self):
# delta_width should be a number
AbinsParameters.delta_width = "fd"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# delta_with is positive so it cannot be negative
AbinsParameters.delta_width = -0.01
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# delta_width should have non-zero value
AbinsParameters.delta_width = 0.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# delta_width should be smaller than one
AbinsParameters.delta_width = 1.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# Tests for TOSCA parameters
def test_wrong_tosca_final_energy(self):
# final energy should be a float not str
AbinsParameters.tosca_final_neutron_energy = "0"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# final energy should be of float type not integer
AbinsParameters.tosca_final_neutron_energy = 1
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# final energy should be positive
AbinsParameters.tosca_final_neutron_energy = -1.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_tosca_cos_scattering_angle(self):
# cosines of scattering angle is float
AbinsParameters.tosca_cos_scattering_angle = "0.0334"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# TOSCA_cos_scattering_angle cannot be integer
AbinsParameters.tosca_cos_scattering_angle = 1
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_tosca_resolution_constant_A(self):
# TOSCA constant should be float
AbinsParameters.tosca_a = "wrong"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_tosca_resolution_constant_B(self):
AbinsParameters.tosca_b = "wrong"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_tosca_resolution_constant_C(self):
AbinsParameters.tosca_c = "wrong"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# tests for folders
def test_wrong_dft_group(self):
# name should be of type str
AbinsParameters.ab_initio_group = 2
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.ab_initio_group = ""
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_powder_data_group(self):
# name should be of type str
AbinsParameters.powder_data_group = 2
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.powder_data_group = ""
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_crystal_data_group(self):
# name should be of type str
AbinsParameters.crystal_data_group = 2
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.crystal_data_group = ""
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_powder_s_data_group(self):
# name should be of type str
AbinsParameters.s_data_group = 2
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.s_data_group = ""
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_doubled_name(self):
# Wrong scenario: two groups with the same name
AbinsParameters.ab_initio_group = "NiceName"
AbinsParameters.powder_data_group = "NiceName"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_min_wavenumber(self):
# minimum wavenumber cannot be negative
AbinsParameters.min_wavenumber = -0.001
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# minimum wavenumber cannot be int
AbinsParameters.min_wavenumber = 1
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_max_wavenumber(self):
# maximum wavenumber cannot be negative
AbinsParameters.max_wavenumber = -0.01
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# maximum wavenumber cannot be integer
AbinsParameters.max_wavenumber = 10
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_energy_window(self):
# min_wavenumber must be smaller than max_wavenumber
AbinsParameters.min_wavenumber = 1000.0
AbinsParameters.max_wavenumber = 10.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_s_absolute_threshold(self):
AbinsParameters.s_absolute_threshold = 1
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
AbinsParameters.s_absolute_threshold = -0.01
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
AbinsParameters.s_absolute_threshold = "Wrong value"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_s_relative_threshold(self):
AbinsParameters.s_relative_threshold = 1
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
AbinsParameters.s_relative_threshold = -0.01
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
AbinsParameters.s_relative_threshold = "Wrong value"
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_optimal_size(self):
# optimal size cannot be negative
AbinsParameters.optimal_size = -10000
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
# optimal size must be of type int
AbinsParameters.optimal_size = 50.0
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_wrong_threads(self):
if PATHOS_FOUND:
AbinsParameters.threads = -1
self.assertRaises(RuntimeError, Abins, VibrationalOrPhononFile=self._Si2 + ".phonon",
OutputWorkspace=self._wrk_name)
def test_good_case(self):
good_names = [self._wrk_name, self._wrk_name + "_Si", self._wrk_name + "_Si_total"]
Abins(VibrationalOrPhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
names = mtd.getObjectNames()
# Builtin cmp has been removed in Python 3
def _cmp(a, b):
return (a > b) - (a < b)
self.assertAlmostEqual(0, _cmp(good_names, names))
if __name__ == "__main__":
unittest.main()
|
ScreamingUdder/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/AbinsAdvancedParametersTest.py
|
Python
|
gpl-3.0
| 12,086
|
[
"CRYSTAL"
] |
74f3781a8155d25c7d8ee9fa050f60b37937d007bb833af0ecf7a1cdc3acd9c7
|
#!/bin/env python
"""Create and put 'ReplicateAndRegister' request."""
__RCSID__ = "$Id$"
import os
from DIRAC.Core.Base import Script
from DIRAC import gLogger
import DIRAC
Script.setUsageMessage( '\n'.join( [ __doc__,
'Usage:',
' %s [option|cfgfile] requestName LFNs targetSE1 [targetSE2 ...]' % Script.scriptName,
'Arguments:',
' requestName: a request name',
' LFNs: single LFN or file with LFNs',
' targetSE: target SE' ] ) )
catalog = None
Script.registerSwitch("C:", "Catalog=", "Catalog to use")
Script.parseCommandLine()
for switch in Script.getUnprocessedSwitches():
if switch[0] == "C" or switch[0].lower() == "catalog":
catalog = switch[1]
def getLFNList( arg ):
""" get list of LFNs """
lfnList = []
if os.path.exists( arg ):
lfnList = [line.split()[0] for line in open( arg ).read().splitlines()]
else:
lfnList = [ arg ]
return list( set ( lfnList ) )
# # execution
if __name__ == "__main__":
args = Script.getPositionalArgs()
requestName = None
targetSEs = None
if len( args ) < 3:
Script.showHelp()
DIRAC.exit( 1 )
requestName = args[0]
lfnList = getLFNList( args[1] )
targetSEs = list( set( [ se for targetSE in args[2:] for se in targetSE.split( ',' ) ] ) )
gLogger.info( "Will create request '%s' with 'ReplicateAndRegister' "\
"operation using %s lfns and %s target SEs" % ( requestName, len( lfnList ), len( targetSEs ) ) )
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.List import breakListIntoChunks
lfnChunks = breakListIntoChunks( lfnList, 100 )
multiRequests = len( lfnChunks ) > 1
error = 0
count = 0
reqClient = ReqClient()
fc = FileCatalog()
requestIDs = []
for lfnChunk in lfnChunks:
metaDatas = fc.getFileMetadata( lfnChunk )
if not metaDatas["OK"]:
gLogger.error( "unable to read metadata for lfns: %s" % metaDatas["Message"] )
error = -1
continue
metaDatas = metaDatas["Value"]
for failedLFN, reason in metaDatas["Failed"].items():
gLogger.error( "skipping %s: %s" % ( failedLFN, reason ) )
lfnChunk = set( metaDatas["Successful"] )
if not lfnChunk:
gLogger.error( "LFN list is empty!!!" )
error = -1
continue
if len( lfnChunk ) > Operation.MAX_FILES:
gLogger.error( "too many LFNs, max number of files per operation is %s" % Operation.MAX_FILES )
error = -1
continue
count += 1
request = Request()
request.RequestName = requestName if not multiRequests else '%s_%d' % ( requestName, count )
replicateAndRegister = Operation()
replicateAndRegister.Type = "ReplicateAndRegister"
replicateAndRegister.TargetSE = ",".join( targetSEs )
if catalog is not None:
replicateAndRegister.Catalog = catalog
for lfn in lfnChunk:
metaDict = metaDatas["Successful"][lfn]
opFile = File()
opFile.LFN = lfn
opFile.Size = metaDict["Size"]
if "Checksum" in metaDict:
# # should check checksum type, now assuming Adler32 (metaDict["ChecksumType"] = 'AD'
opFile.Checksum = metaDict["Checksum"]
opFile.ChecksumType = "ADLER32"
replicateAndRegister.addFile( opFile )
request.addOperation( replicateAndRegister )
putRequest = reqClient.putRequest( request )
if not putRequest["OK"]:
gLogger.error( "unable to put request '%s': %s" % ( request.RequestName, putRequest["Message"] ) )
error = -1
continue
requestIDs.append( str( putRequest["Value"] ) )
if not multiRequests:
gLogger.always( "Request '%s' has been put to ReqDB for execution." % request.RequestName )
if multiRequests:
gLogger.always( "%d requests have been put to ReqDB for execution, with name %s_<num>" % ( count, requestName ) )
if requestIDs:
gLogger.always( "RequestID(s): %s" % " ".join( requestIDs ) )
gLogger.always("You can monitor requests' status using command: 'dirac-rms-request <requestName/ID>'")
DIRAC.exit( error )
|
fstagni/DIRAC
|
DataManagementSystem/scripts/dirac-dms-replicate-and-register-request.py
|
Python
|
gpl-3.0
| 4,496
|
[
"DIRAC"
] |
521d952a76e95deaf29e22990a1f86634cbf9abd3f30e101b6873400bf6408b7
|
"""Represent a Sequence Feature holding info about a part of a sequence.
This is heavily modeled after the Biocorba SeqFeature objects, and
may be pretty biased towards GenBank stuff since I'm writing it
for the GenBank parser output...
What's here:
Base class to hold a Feature.
----------------------------
classes:
o SeqFeature
Hold information about a Reference.
----------------------------------
This is an attempt to create a General class to hold Reference type
information.
classes:
o Reference
Specify locations of a feature on a Sequence.
---------------------------------------------
This aims to handle, in Ewan's words, 'the dreaded fuzziness issue' in
much the same way as Biocorba. This has the advantages of allowing us
to handle fuzzy stuff in case anyone needs it, and also be compatible
with Biocorba.
classes:
o FeatureLocation - Specify the start and end location of a feature.
o ExactPosition - Specify the position as being exact.
o WithinPosition - Specify a position occuring within some range.
o BetweenPosition - Specify a position occuring between a range (OBSOLETE?).
o BeforePosition - Specify the position as being found before some base.
o AfterPosition - Specify the position as being found after some base.
o OneOfPosition - Specify a position where the location can be multiple positions.
"""
from Bio.Seq import MutableSeq, reverse_complement
class SeqFeature(object):
"""Represent a Sequence Feature on an object.
Attributes:
o location - the location of the feature on the sequence (FeatureLocation)
o type - the specified type of the feature (ie. CDS, exon, repeat...)
o location_operator - a string specifying how this SeqFeature may
be related to others. For example, in the example GenBank feature
shown below, the location_operator would be "join"
o strand - A value specifying on which strand (of a DNA sequence, for
instance) the feature deals with. 1 indicates the plus strand, -1
indicates the minus strand, 0 indicates both strands, and None indicates
that strand doesn't apply (ie. for proteins) or is not known.
o id - A string identifier for the feature.
o ref - A reference to another sequence. This could be an accession
number for some different sequence.
o ref_db - A different database for the reference accession number.
o qualifiers - A dictionary of qualifiers on the feature. These are
analagous to the qualifiers from a GenBank feature table. The keys of
the dictionary are qualifier names, the values are the qualifier
values.
o sub_features - Additional SeqFeatures which fall under this 'parent'
feature. For instance, if we having something like:
CDS join(1..10,30..40,50..60)
The the top level feature would be a CDS from 1 to 60, and the sub
features would be of 'CDS_join' type and would be from 1 to 10, 30 to
40 and 50 to 60, respectively.
To get the nucleotide sequence for this CDS, you would need to take the
parent sequence and do seq[0:10]+seq[29:40]+seq[49:60] (Python counting).
Things are more complicated with strands and fuzzy positions. To save you
dealing with all these special cases, the SeqFeature provides an extract
method to do this for you.
"""
def __init__(self, location = None, type = '', location_operator = '',
strand = None, id = "<unknown id>",
qualifiers = None, sub_features = None,
ref = None, ref_db = None):
"""Initialize a SeqFeature on a Sequence.
location can either be a FeatureLocation (with strand argument also
given if required), or a Python slice (with strand given as the step).
e.g. With no strand, on the forward strand, and on the reverse strand:
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> f1 = SeqFeature(FeatureLocation(5,10), type="domain")
>>> f2 = SeqFeature(FeatureLocation(7,110), strand=1, type="CDS")
>>> f3 = SeqFeature(FeatureLocation(9,108), strand=-1, type="CDS")
An invalid strand will trigger an exception:
>>> f4 = SeqFeature(FeatureLocation(50,60), strand=2)
Traceback (most recent call last):
...
ValueError: Strand should be +1, -1, 0 or None, not 2
For exact start/end positions, an integer can be used (as shown above)
as shorthand for the ExactPosition object. For non-exact locations, the
FeatureLocation must be specified via the appropriate position objects.
"""
if strand not in [-1, 0, 1, None] :
raise ValueError("Strand should be +1, -1, 0 or None, not %s" \
% repr(strand))
if location and not isinstance(location, FeatureLocation):
raise TypeError("FeatureLocation (or None) required for the location")
self.location = location
self.type = type
self.location_operator = location_operator
self.strand = strand
self.id = id
if qualifiers is None:
qualifiers = {}
self.qualifiers = qualifiers
if sub_features is None:
sub_features = []
self.sub_features = sub_features
self.ref = ref
self.ref_db = ref_db
def __repr__(self):
"""A string representation of the record for debugging."""
answer = "%s(%s" % (self.__class__.__name__, repr(self.location))
if self.type:
answer += ", type=%s" % repr(self.type)
if self.location_operator:
answer += ", location_operator=%s" % repr(self.location_operator)
if self.strand:
answer += ", strand=%s" % repr(self.strand)
if self.id and self.id != "<unknown id>":
answer += ", id=%s" % repr(self.id)
if self.ref:
answer += ", ref=%s" % repr(self.ref)
if self.ref_db:
answer += ", ref_db=%s" % repr(self.ref_db)
answer += ")"
return answer
def __str__(self):
"""A readable summary of the feature intended to be printed to screen.
"""
out = "type: %s\n" % self.type
out += "location: %s\n" % self.location
out += "ref: %s:%s\n" % (self.ref, self.ref_db)
out += "strand: %s\n" % self.strand
out += "qualifiers: \n"
qualifier_keys = self.qualifiers.keys()
qualifier_keys.sort()
for qual_key in qualifier_keys:
out += " Key: %s, Value: %s\n" % (qual_key,
self.qualifiers[qual_key])
if len(self.sub_features) != 0:
out += "Sub-Features\n"
for sub_feature in self.sub_features:
out +="%s\n" % sub_feature
return out
def _shift(self, offset):
"""Returns a copy of the feature with its location shifted (PRIVATE).
The annotation qaulifiers are copied."""
answer = SeqFeature(location = self.location._shift(offset),
type = self.type,
location_operator = self.location_operator,
strand = self.strand,
id = self.id,
#qualifiers = dict(self.qualifiers.iteritems()),
#sub_features = [f._shift(offset) for f in self.sub_features],
ref = self.ref,
ref_db = self.ref_db)
#TODO - Sort out the use of sub_feature and qualifiers in __init___
answer.sub_features = [f._shift(offset) for f in self.sub_features]
answer.qualifiers = dict(self.qualifiers.iteritems())
return answer
def extract(self, parent_sequence):
"""Extract feature sequence from the supplied parent sequence.
The parent_sequence can be a Seq like object or a string, and will
generally return an object of the same type. The exception to this is
a MutableSeq as the parent sequence will return a Seq object.
This should cope with complex locations including complements, joins
and fuzzy positions. Even mixed strand features should work! This
also covers features on protein sequences (e.g. domains), although
here reverse strand features are not permitted.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_protein
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> seq = Seq("MKQHKAMIVALIVICITAVVAAL", generic_protein)
>>> f = SeqFeature(FeatureLocation(8,15), type="domain")
>>> f.extract(seq)
Seq('VALIVIC', ProteinAlphabet())
Note - currently only sub-features of type "join" are supported.
"""
if isinstance(parent_sequence, MutableSeq):
#This avoids complications with reverse complements
#(the MutableSeq reverse complement acts in situ)
parent_sequence = parent_sequence.toseq()
if self.sub_features:
if self.location_operator!="join":
raise ValueError(f.location_operator)
if self.strand == -1:
#This is a special case given how the GenBank parser works.
#Must avoid doing the reverse complement twice.
parts = []
for f_sub in self.sub_features:
assert f_sub.strand==-1
parts.append(parent_sequence[f_sub.location.nofuzzy_start:\
f_sub.location.nofuzzy_end])
else:
#This copes with mixed strand features:
parts = [f_sub.extract(parent_sequence) \
for f_sub in self.sub_features]
#We use addition rather than a join to avoid alphabet issues:
f_seq = parts[0]
for part in parts[1:] : f_seq += part
else:
f_seq = parent_sequence[self.location.nofuzzy_start:\
self.location.nofuzzy_end]
if self.strand == -1:
#TODO - MutableSeq?
try:
f_seq = f_seq.reverse_complement()
except AttributeError:
assert isinstance(f_seq, str)
f_seq = reverse_complement(f_seq)
return f_seq
# --- References
# TODO -- Will this hold PubMed and Medline information decently?
class Reference(object):
"""Represent a Generic Reference object.
Attributes:
o location - A list of Location objects specifying regions of
the sequence that the references correspond to. If no locations are
specified, the entire sequence is assumed.
o authors - A big old string, or a list split by author, of authors
for the reference.
o title - The title of the reference.
o journal - Journal the reference was published in.
o medline_id - A medline reference for the article.
o pubmed_id - A pubmed reference for the article.
o comment - A place to stick any comments about the reference.
"""
def __init__(self):
self.location = []
self.authors = ''
self.consrtm = ''
self.title = ''
self.journal = ''
self.medline_id = ''
self.pubmed_id = ''
self.comment = ''
def __str__(self):
"""Output an informative string for debugging.
"""
out = ""
for single_location in self.location:
out += "location: %s\n" % single_location
out += "authors: %s\n" % self.authors
if self.consrtm:
out += "consrtm: %s\n" % self.consrtm
out += "title: %s\n" % self.title
out += "journal: %s\n" % self.journal
out += "medline id: %s\n" % self.medline_id
out += "pubmed id: %s\n" % self.pubmed_id
out += "comment: %s\n" % self.comment
return out
def __repr__(self):
#TODO - Update this is __init__ later accpets values
return "%s(title=%s, ...)" % (self.__class__.__name__,
repr(self.title))
# --- Handling feature locations
class FeatureLocation(object):
"""Specify the location of a feature along a sequence.
This attempts to deal with fuzziness of position ends, but also
make it easy to get the start and end in the 'normal' case (no
fuzziness).
You should access the start and end attributes with
your_location.start and your_location.end. If the start and
end are exact, this will return the positions, if not, we'll return
the approriate Fuzzy class with info about the position and fuzziness.
Note that the start and end location numbering follow Python's scheme,
thus a GenBank entry of 123..150 (one based counting) becomes a location
of [122:150] (zero based counting).
"""
def __init__(self, start, end):
"""Specify the start and end of a sequence feature.
start and end arguments specify the values where the feature begins
and ends. These can either by any of the *Position objects that
inherit from AbstractPosition, or can just be integers specifying the
position. In the case of integers, the values are assumed to be
exact and are converted in ExactPosition arguments. This is meant
to make it easy to deal with non-fuzzy ends.
i.e. Short form:
>>> from Bio.SeqFeature import FeatureLocation
>>> loc = FeatureLocation(5,10)
Explicit form:
>>> from Bio.SeqFeature import FeatureLocation, ExactPosition
>>> loc = FeatureLocation(ExactPosition(5),ExactPosition(10))
Other fuzzy positions are used similarly,
>>> from Bio.SeqFeature import FeatureLocation
>>> from Bio.SeqFeature import BeforePosition, AfterPosition
>>> loc2 = FeatureLocation(BeforePosition(5),AfterPosition(10))
"""
if isinstance(start, AbstractPosition):
self._start = start
else:
self._start = ExactPosition(start)
if isinstance(end, AbstractPosition):
self._end = end
else:
self._end = ExactPosition(end)
def __str__(self):
"""Returns a representation of the location (with python counting).
For the simple case this uses the python splicing syntax, [122:150]
(zero based counting) which GenBank would call 123..150 (one based
counting).
"""
return "[%s:%s]" % (self._start, self._end)
def __repr__(self):
"""A string representation of the location for debugging."""
return "%s(%s,%s)" \
% (self.__class__.__name__, repr(self.start), repr(self.end))
def _shift(self, offset):
"""Returns a copy of the location shifted by the offset (PRIVATE)."""
return FeatureLocation(start = self._start._shift(offset),
end = self._end._shift(offset))
start = property(fget= lambda self : self._start,
doc="Start location (possibly a fuzzy position, read only).")
end = property(fget= lambda self : self._end,
doc="End location (possibly a fuzzy position, read only).")
def _get_nofuzzy_start(self):
#TODO - Do we still use the BetweenPosition class?
if ((self._start == self._end) and isinstance(self._start,
BetweenPosition)):
return self._start.position
else:
return min(self._start.position,
self._start.position + self._start.extension)
nofuzzy_start = property(fget=_get_nofuzzy_start,
doc="""Start position (integer, approximated if fuzzy, read only).
To get non-fuzzy attributes (ie. the position only) ask for
'location.nofuzzy_start', 'location.nofuzzy_end'. These should return
the largest range of the fuzzy position. So something like:
(10.20)..(30.40) should return 10 for start, and 40 for end.
""")
def _get_nofuzzy_end(self):
#TODO - Do we still use the BetweenPosition class?
if ((self._start == self._end) and isinstance(self._start,
BetweenPosition)):
return self._end.position
else:
return max(self._end.position,
self._end.position + self._end.extension)
nofuzzy_end = property(fget=_get_nofuzzy_end,
doc="""End position (integer, approximated if fuzzy, read only).
To get non-fuzzy attributes (ie. the position only) ask for
'location.nofuzzy_start', 'location.nofuzzy_end'. These should return
the largest range of the fuzzy position. So something like:
(10.20)..(30.40) should return 10 for start, and 40 for end.
""")
class AbstractPosition(object):
"""Abstract base class representing a position.
"""
def __init__(self, position, extension):
self.position = position
self.extension = extension
def __repr__(self):
"""String representation of the location for debugging."""
return "%s(%s,%s)" % (self.__class__.__name__, \
repr(self.position), repr(self.extension))
def __cmp__(self, other):
"""A simple comparison function for positions.
This is very simple-minded and just compares the position attribute
of the features; extensions are not considered at all. This could
potentially be expanded to try to take advantage of extensions.
"""
assert isinstance(other, AbstractPosition), \
"We can only do comparisons between Biopython Position objects."
return cmp(self.position, other.position)
def _shift(self, offset):
#We want this to maintain the subclass when called from a subclass
return self.__class__(self.position + offset, self.extension)
class ExactPosition(AbstractPosition):
"""Specify the specific position of a boundary.
o position - The position of the boundary.
o extension - An optional argument which must be zero since we don't
have an extension. The argument is provided so that the same number of
arguments can be passed to all position types.
In this case, there is no fuzziness associated with the position.
"""
def __init__(self, position, extension = 0):
if extension != 0:
raise AttributeError("Non-zero extension %s for exact position."
% extension)
AbstractPosition.__init__(self, position, 0)
def __repr__(self):
"""String representation of the ExactPosition location for debugging."""
assert self.extension == 0
return "%s(%s)" % (self.__class__.__name__, repr(self.position))
def __str__(self):
return str(self.position)
class WithinPosition(AbstractPosition):
"""Specify the position of a boundary within some coordinates.
Arguments:
o position - The start position of the boundary
o extension - The range to which the boundary can extend.
This allows dealing with a position like ((1.4)..100). This
indicates that the start of the sequence is somewhere between 1
and 4. To represent that with this class we would set position as
1 and extension as 3.
"""
def __init__(self, position, extension = 0):
AbstractPosition.__init__(self, position, extension)
def __str__(self):
return "(%s.%s)" % (self.position, self.position + self.extension)
class BetweenPosition(AbstractPosition):
"""Specify the position of a boundary between two coordinates (OBSOLETE?).
Arguments:
o position - The start position of the boundary.
o extension - The range to the other position of a boundary.
This specifies a coordinate which is found between the two positions.
So this allows us to deal with a position like ((1^2)..100). To
represent that with this class we set position as 1 and the
extension as 1.
"""
def __init__(self, position, extension = 0):
AbstractPosition.__init__(self, position, extension)
def __str__(self):
return "(%s^%s)" % (self.position, self.position + self.extension)
class BeforePosition(AbstractPosition):
"""Specify a position where the actual location occurs before it.
Arguments:
o position - The upper boundary of where the location can occur.
o extension - An optional argument which must be zero since we don't
have an extension. The argument is provided so that the same number of
arguments can be passed to all position types.
This is used to specify positions like (<10..100) where the location
occurs somewhere before position 10.
"""
def __init__(self, position, extension = 0):
if extension != 0:
raise AttributeError("Non-zero extension %s for exact position."
% extension)
AbstractPosition.__init__(self, position, 0)
def __repr__(self):
"""A string representation of the location for debugging."""
assert self.extension == 0
return "%s(%s)" % (self.__class__.__name__, repr(self.position))
def __str__(self):
return "<%s" % self.position
class AfterPosition(AbstractPosition):
"""Specify a position where the actual location is found after it.
Arguments:
o position - The lower boundary of where the location can occur.
o extension - An optional argument which must be zero since we don't
have an extension. The argument is provided so that the same number of
arguments can be passed to all position types.
This is used to specify positions like (>10..100) where the location
occurs somewhere after position 10.
"""
def __init__(self, position, extension = 0):
if extension != 0:
raise AttributeError("Non-zero extension %s for exact position."
% extension)
AbstractPosition.__init__(self, position, 0)
def __repr__(self):
"""A string representation of the location for debugging."""
assert self.extension == 0
return "%s(%s)" % (self.__class__.__name__, repr(self.position))
def __str__(self):
return ">%s" % self.position
class OneOfPosition(AbstractPosition):
"""Specify a position where the location can be multiple positions.
This models the GenBank 'one-of(1888,1901)' function, and tries
to make this fit within the Biopython Position models. In our case
the position of the "one-of" is set as the lowest choice, and the
extension is the range to the highest choice.
"""
def __init__(self, position_list):
"""Initialize with a set of posssible positions.
position_list is a list of AbstractPosition derived objects,
specifying possible locations.
"""
# unique attribute for this type of positions
self.position_choices = position_list
# find the smallest and largest position in the choices
smallest = None
largest = None
for position_choice in self.position_choices:
assert isinstance(position_choice, AbstractPosition), \
"Expected position objects, got %r" % position_choice
if smallest is None and largest is None:
smallest = position_choice.position
largest = position_choice.position
elif position_choice.position > largest:
largest = position_choice.position
elif position_choice.position < smallest:
smallest = position_choice.position
# initialize with our definition of position and extension
AbstractPosition.__init__(self, smallest, largest - smallest)
def __repr__(self):
"""String representation of the OneOfPosition location for debugging."""
return "%s(%s)" % (self.__class__.__name__, \
repr(self.position_choices))
def __str__(self):
out = "one-of("
for position in self.position_choices:
out += "%s," % position
# replace the last comma with the closing parenthesis
out = out[:-1] + ")"
return out
def _shift(self, offset):
return self.__class__([position_choice._shift(offset) \
for position_choice in self.position_choices])
class PositionGap(object):
"""Simple class to hold information about a gap between positions.
"""
def __init__(self, gap_size):
"""Intialize with a position object containing the gap information.
"""
self.gap_size = gap_size
def __repr__(self):
"""A string representation of the position gap for debugging."""
return "%s(%s)" % (self.__class__.__name__, repr(self.gap_size))
def __str__(self):
out = "gap(%s)" % self.gap_size
return out
def _test():
"""Run the Bio.SeqFeature module's doctests."""
print "Runing doctests..."
import doctest
doctest.testmod()
print "Done"
if __name__ == "__main__":
_test()
|
NirBenTalLab/proorigami-cde-package
|
cde-root/usr/lib64/python2.4/site-packages/Bio/SeqFeature.py
|
Python
|
mit
| 25,126
|
[
"Biopython"
] |
5c0b12f8a9aaf390db37e6eaa005ec844a06a55143e3111aa6d5910348cbcf41
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2000 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-* Scott Dixon, Metaphorics, LLC
#-*
#-*
#Z* -------------------------------------------------------------------
"""
Author: Scott Dixon, Metaphorics, LLC
This source code is contributed to the public domain and may be freely
copied and distributed for research, profit, fun or any other reason,
with these restrictions: (1) unmodified or functionally equivalent code
derived from this code must contain this notice, (2) all derived code
must acknowledge the author and institution, and (3) no liability is
assumed by the author(s) for any use or misuse of this software.
CEX input routines. Reads each CEX object into a test based tree.
Provides a CEX smiles interpreter class which can be specialized to create
appropriate molecule object """
import string
class CEXstream:
"""Input stream which read from file object"""
(START, COMMENT, QUOTE, NOTQUOTE, GOTQUOTE, TAG, VALUE, END) = range(8)
TAG_CHAR = string.letters + string.digits + "$_/"
def __init__(self,file):
self.file = file
self.dt=None
self.oldch = 0
self.buff = ""
self.p = 0
self.len = 0
def readEntry(self):
"""Read one tag<value> entry from stream"""
# find nonblank character
str = ""
p = 0
while 1:
try:
if self.buff[p] not in string.whitespace:
break
p = p + 1
except IndexError:
self.buff = self.file.read(1000)
p = 0
if len(self.buff) == 0:
return (None, None)
self.buff = self.buff[p:]
if self.buff[0] == "|":
self.buff = self.buff[1:]
return ("|","")
while 1:
try:
while 1:
p = string.index(self.buff,">") + 1
str = str + self.buff[:p]
self.buff = self.buff[p:]
if string.count(str,'"') %2 == 0:
break
except (ValueError, IndexError):
str = str + self.buff
self.buff = self.file.read(1000)
if len(self.buff)==0:
if string.find(str,"|") >= 0:
return ("|","")
else:
return (None, None)
else: break
s = string.find(str,"<")
if s < 0:
return (None, None)
else:
return (str[:s],str[s+1:-1])
class CEXsmilesError(Exception):
def __init__(self,smiles,p,msg):
self.args="Smiles error: " + msg + "\n" + smiles + "\n" + p*" " + "^"
class CEXsmilesParser:
"""A simple CEX smiles parser adapted from Dave Weininger's C version in the
CEX toolkit"""
MX_NESTING=4096
MX_RINGS=1000
ptab = {"*":0,
"H":1, "He":2, "Li":3, "Be":4, "B":5, "C":6, "N":7, "O":8, "F":9, "Ne":10,
"Na":11, "Mg":12, "Al":13, "Si":14, "P":15, "S":16, "Cl":17, "Ar":18, "K":19, "Ca":20,
"Sc:":21, "Ti":22, "V":23, "Cr":24, "Mn":25, "Fe":26, "Co":27, "Ni":28, "Cu":29, "Zn":30,
"Ga":31, "Ge":32, "As":33, "Se":34, "Br":35, "Kr":36, "Rb":37, "Sr":38, "Y":39, "Zr":40,
"Nb":41, "Mo":42, "Tc":43, "Ru":44, "Rh":45, "Pd":46, "Ag":47, "Cd":48, "In":49, "Sn":50,
"Sb":51, "Te":52, "I":53, "Xe":54, "Cs":55, "Ba":56, "La":57, "Ce":58, "Pr":59, "Nd":60,
"Pm":61, "Sm":62, "Eu":63, "Gd":64, "Tb":65, "Dy":66, "Ho":67, "Er":68, "Tm":69, "Yb":70,
"Lu":71, "Hf":72, "Ta":73, "W":74, "Re":75, "Os":76, "Ir":77, "Pt":78, "Au":79, "Hg":80,
"Tl":81, "Pb":82, "Bi":83, "Po":84, "At":85, "Rn":86, "Fr":87, "Ra":88, "Ac":89, "Th":90,
"Pa":91, "U":92, "Np":93, "Pu":94, "Am":95, "Cm":96, "Bk":97, "Cf":98, "Es":99, "Fm":100,
"Md":101, "No":102, "Lr":103, "Rf":104, "Ha":105}
stab = {0:"*",
1:"H", 2:"He", 3:"Li", 4:"Be", 5:"B", 6:"C", 7:"N", 8:"O", 9:"F", 10:"Ne",
11:"Na", 12:"Mg", 13:"Al", 14:"Si", 15:"P", 16:"S", 17:"Cl", 18:"Ar", 19:"K", 20:"Ca",
21:"Sc:", 22:"Ti", 23:"V", 24:"Cr", 25:"Mn", 26:"Fe", 27:"Co", 28:"Ni", 29:"Cu", 30:"Zn",
31:"Ga", 32:"Ge", 33:"As", 34:"Se", 35:"Br", 36:"Kr", 37:"Rb", 38:"Sr", 39:"Y", 40:"Zr",
41:"Nb", 42:"Mo", 43:"Tc", 44:"Ru", 45:"Rh", 46:"Pd", 47:"Ag", 48:"Cd", 49:"In", 50:"Sn",
51:"Sb", 52:"Te", 53:"I", 54:"Xe", 55:"Cs", 56:"Ba", 57:"La", 58:"Ce", 59:"Pr", 60:"Nd",
61:"Pm", 62:"Sm", 63:"Eu", 64:"Gd", 65:"Tb", 66:"Dy", 67:"Ho", 68:"Er", 69:"Tm", 70:"Yb",
71:"Lu", 72:"Hf", 73:"Ta", 74:"W", 75:"Re", 76:"Os", 77:"Ir", 78:"Pt", 79:"Au", 80:"Hg",
81:"Tl", 82:"Pb", 83:"Bi", 84:"Po", 85:"At", 86:"Rn", 87:"Fr", 88:"Ra", 89:"Ac", 90:"Th",
91:"Pa", 92:"U", 93:"Np", 94:"Pu", 95:"Am", 96:"Cm", 97:"Bk", 98:"Cf", 99:"Es", 100:"Fm",
101:"Md", 102:"No", 103:"Lr", 104:"Rf", 105:"Ha"}
def sym2num(self,sym):
try:
return CEXsmilesParser.ptab[sym]
except KeyError:
return -1
def num2sym(self,num):
try:
return CEXsmilesParser.stab[num]
except KeyError:
return ""
def needquote(self,atnum):
if atnum in (0,5,6,7,8,9,15,16,17,35,53): return 0
else: return 1
def __init__(self):
self.atomN = 0
def MakeAtom(self, atnum):
print "Atom %d, atomic number %d" % (self.atomN, atnum)
self.atomN = self.atomN + 1
return self.atomN-1
def MakeBond(self, at1, at2, bo):
print "Bond between %d and %d, order %d" % (at1, at2,bo)
def SetHcount(self, atom, count):
print "Explicit H count %d for atom %d" % (count, atom)
def SetFormalCharge(self, atom, charge):
print "Charge for atom %d is %d" % (atom, charge)
def SetAtomicMass(self, atom, mass):
print "Mass from atom %d is %d" % (atom, mass)
def parse(self,smiles):
self.smiles=smiles + 3*"\0" # guard zone for illegal smiles
self.__init__()
self.ringat = [None]*CEXsmilesParser.MX_RINGS
self.fromat = [None]*CEXsmilesParser.MX_RINGS
self.ringbo = [0]*CEXsmilesParser.MX_NESTING
self.molname = ""
lev = 0
atnum = -1
imph = -1
bo = 0
charge = 0
quoted = 0
mass = 0
# adapted from Dave Wieninger's code in the CEX toolkits
p = 0
while p < len(self.smiles):
pp = p + 1
ch = self.smiles[p]
if ch == "(":
self.fromat[lev + 1] = self.fromat[lev]
lev = lev + 1
elif ch == ")": lev = lev - 1
elif ch == "[":
if quoted:
# error, no closing ]
raise CEXsmilesError(smiles,p,"No closing ]")
else:
quoted = 1
if self.smiles[pp] in string.digits:
p = pp
while self.smiles[p+1] in string.digits:
p = p + 1
mass = string.atoi(self.smiles[pp:p+1])
elif ch == "]":
if not quoted:
# error, no opening ]
raise CEXsmilesError(smiles,p,"No opening ]")
else:
quoted = 0
elif ch == ".":
self.fromat[lev] = None # disconnected parts
# bond types
elif ch == "=": bo = 2
elif ch == "#": bo = 3
elif ch == "-" and not quoted: bo = 1
# atom charge
elif ch == "-" or ch == "+":
if not quoted:
# error charge not in []
raise CEXsmilesError(smiles,p,"Charge not in []")
elif self.fromat[lev] is None:
# error charge precedes atomic symbol
raise CEXsmilesError(smiles,p,"Charge precedes atomic symbol")
else:
charge = 0
sign = 1
if ch == "-": sign = -1
while self.smiles[p+1] in string.digits:
charge = 10*charge + string.atoi(self.smiles[p+1])
p = p + 1
if charge == 0: charge = 1
charge = sign*charge
# allow for multiple + and - specifiers
while self.smiles[p+1] == "+":
charge = charge + 1
p = p + 1
while self.smiles[p+1] == "-":
charge = charge - 1
p = p + 1
if charge != 0: self.SetFormalCharge(atom, charge)
elif ch in string.digits or ch == "%" or ch == "^":
# deal with ring closures
if ch == "%":
if self.smiles[p+1] in string.digits and self.smiles[p+2] in string.digits:
ir = string.atoi(self.smiles[p+1:p+3])
p = p + 2
else:
# error expect 2 digits after %
raise CEXsmilesError(smiles,p,"Expect 2 digits after %")
elif ch == "^":
if self.smiles[p+1] in string.digits and self.smiles[p+2] in string.digits and self.smiles[p+3] in string.digits:
ir = string.atoi(self.smiles[p+1:p+4])
p = p + 3
else:
#error expect 3 digits after ^
raise CEXsmilesError(smiles,p,"Expect 3 digits after ^")
else:
ir = string.atoi(ch)
if self.ringat[ir] is None:
self.ringat[ir] = self.fromat[lev]
self.ringbo[ir] = bo
elif bo and self.ringbo[ir] and bo != self.ringbo[ir]:
#error conflicting closure bond orders
raise CEXsmilesError(smiles,p,"Conflicting closure bond orders")
else:
if not bo: bo = 1
if self.ringbo[ir]: bo = self.ringbo[ir]
self.MakeBond(self.fromat[lev],self.ringat[ir],bo)
self.ringat[ir] = None
self.ringbo[ir] = 0
bo = 0
elif ch in "*ABCDEFGHIKLMNOPRSTUVWXYZ":
# recognize atomic symbols
atnum = -1
if self.smiles[pp] in string.lowercase:
atnum = self.sym2num(self.smiles[p:p+2])
if atnum > -1: p = p + 1
else: atnum = self.sym2num(self.smiles[p])
if atnum < 0:
#error bad atomic symbol
raise CEXsmilesError(smiles,p,"Bad atomic symbol")
if not quoted and self.needquote(atnum):
# error symbol needs []'s
raise CEXsmilesError(smiles,p,"Symbol needs []")
atom = self.MakeAtom(atnum)
if not bo: bo = 1
if (self.fromat[lev] is not None) and atom != self.fromat[lev]:
self.MakeBond(atom,self.fromat[lev],bo)
self.fromat[lev] = atom
if not quoted: imph = -1
if mass > 0: self.SetAtomicMass(atom, mass)
if quoted and atom is not None:
#deal with explict hydrogen counts
if self.smiles[p+1] != "H":
imph = 0
else:
imph = 1
p = p + 1
j = p
while self.smiles[p+1] in string.digits:
p = p + 1
if j < p: imph = string.atoi(self.smiles[j+1:p+1])
if imph >= 0: self.SetHcount(atom,imph)
# reset default attributes to undefined
bo = 0
charge = 0
mass = 0
imph = -1
elif ch in string.whitespace:
# extract molecul name from following text
self.molname = self.smiles[p+1:-3]
break
elif ch == "\0":
pass #ignore guard characters
else:
# everything else is an error
# error invalid character
raise CEXsmilesError(smiles,p,"Invalid character")
# end of while
p = p + 1
class CEXprop:
def __init__(self, tag, value):
self.name = tag
self.value = value
def __str__(self):
return self.name + "<" + self.value + ">"
class CEXchild(CEXprop):
def __init__(self, tag, value):
CEXprop.__init__(self, tag, value)
self.proplist = []
def __str__(self):
str = self.name + "<" + self.value + ">"
for p in self.properties():
str = str + "\n" + p.__str__()
return str
def addProp(self, prop):
self.proplist.append(prop)
def properties(self):
return self.proplist
class CEXroot(CEXchild):
def __init__(self, tag, value):
CEXchild.__init__(self, tag, value)
self.childlist = []
def addChild(self, child):
self.childlist.append(child)
def children(self):
return self.childlist
def __str__(self):
str = self.name + "<" + self.value + ">"
for p in self.properties():
str = str + "\n" + p.__str__()
for p in self.children():
str = str + "\n" + p.__str__()
return str
def readTree(cxstream):
"""Read tree of CEX object from stream"""
(tag, value) = cxstream.readEntry()
if not tag: return None
if tag[0] != "$": return None
root = CEXroot(tag,value)
(tag, value) = cxstream.readEntry()
if tag == None: return None
while 1:
if tag == "|": break
if tag == None: break
if tag[0] == "/":
root.addProp(CEXprop(tag, value))
(tag, value) = cxstream.readEntry()
else:
# Hardwired for root/child two level hierarchy
child = CEXchild(tag, value)
while 1:
(tag, value) = cxstream.readEntry()
if tag == "|": break
if tag == None: break
if tag[0] == "/":
child.addProp(CEXprop(tag, value))
continue
else: break
root.addChild(child)
return root
def __follow_child(rec):
print " " + rec.name, rec.value
for prop in rec.properties():
print " " + prop.name, prop.value
def spew(rec):
print rec.name, rec.value
for prop in rec.properties():
print prop.name, prop.value
for child in rec.children():
__follow_child(child)
def selectChildren(rec, string):
return filter(lambda x, string=string: x.name==string, rec.children())
def selectProperty(rec, string):
for prop in rec.properties():
if prop.name == string: return prop
if __name__ == "__main__":
import StringIO
def test(string):
print "test: ",string
s = StringIO.StringIO(string)
c = CEXstream(s)
print c.readEntry()
s.close()
test("|")
test("tag<value>")
test(" tag<value>")
test("$tag<value>")
test("/tag<value>")
test("/tag_tag<value>")
test('tag<"value">')
test('tag<"value>">')
test('tag<"""value>">')
def test2(string):
print "test2: ", string
s = StringIO.StringIO(string)
c = CEXstream(s)
tree = readTree(c)
spew(tree)
test2("$root<test>|")
test2("$root<test>/prop<value>|")
test2("$root<test>child<value>|")
test2("$root<test>/prop<value>/prop2<value2>|")
test2("$root<test>/prop<value>/prop2<value2>child<valuec>|")
test2("$root<test>/prop<value>/prop2<value2>child<valuec>/cprop<cv>|")
def test2a(string):
print "test2a: ", string
s = StringIO.StringIO(string)
c = CEXstream(s)
tree = readTree(c)
spew(tree)
tree = readTree(c)
spew(tree)
test2a("$root<test>/prop<value>/prop2<value2>child<valuec>/cprop<cv>|$root2<test2>/prop<val>child<val>|")
def test3(string):
print "test3: ",string
parser = CEXsmilesParser()
try:
parser.parse(string)
print parser.molname
except CEXsmilesError, data:
print data
test3("[C+2]")
test3("[C++]")
test3("[C+-]")
test3("[C-2]")
test3("[C--]")
test3("[C-+]")
test3("[CH3+2]")
test3("N1#CC1")
test3("N1#[CH3+2]C=1")
test3("C%12CC%12")
test3("C^123CC^123")
test3("N1#[13CH3+2]C=1 test")
test3("[N+1]C")
test3("[N+]C")
test3("N=[N+]=[N-]")
test3("CC[[N]")
test3("C=1CC-1")
test3("[C]]")
test3("C@1")
test3("C+2")
test3("[+2C]")
test3("Si")
test3("[Tx]")
test3("C%1CC%1")
test3("C^12CC^12")
test3("[NH2+]")
|
gratefulfrog/lib
|
python/chempy/cex.py
|
Python
|
gpl-2.0
| 17,584
|
[
"PyMOL"
] |
24b3c6bb9fd2938565940181073143c89c53d60aebf825f99a2c001a0ea10e88
|
from __future__ import print_function
import sys
sys.path.insert(1, "../../../../")
import random
import os
import math
import numpy as np
import h2o
import time
from builtins import range
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
from scipy import stats
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
class TestGLMBinomial:
"""
This class is created to test the GLM algo with Binomial family. In this case, the relationship
between the response Y and predictor vector X is assumed to be
Prob(Y = 1|X) = exp(W^T * X + E)/(1+exp(W^T * X + E)) where E is unknown Gaussian noise. We
generate random data set using the exact formula. To evaluate the H2O GLM Model, we run the sklearn
logistic regression with the same data sets and compare the performance of the two. If they are close
enough within a certain tolerance, we declare the H2O model working. When regularization and other
parameters are enabled, we can evaluate H2O GLM model performance by comparing the logloss/accuracy
from H2O model and to the H2O model generated without regularization. As long as they do not deviate
too much, we consider the H2O model performance satisfactory.
In particular, I have written 8 tests in the hope to exercise as many parameters settings of the GLM
algo with Binomial distribution as possible. Tomas has requested 2 tests to be added to test his new
feature of missing_values_handling with predictors with both categorical/real columns. Here is a list
of all tests descriptions:
test1_glm_no_regularization(): sklearn logistic regression model is built.
H2O GLM is built for Binomial family with the same random data sets. We observe
the weights, confusion matrices from the two models. We compare the logloss, prediction
accuracy from the two models to determine if H2O GLM model shall pass the test.
test2_glm_lambda_search(): test lambda search with alpha set to 0.5 per Tomas's
suggestion. Make sure logloss and prediction accuracy generated here is comparable in
value to H2O GLM with no regularization.
test3_glm_grid_search_over_params(): test grid search over
various alpha values while lambda is set to be the best value obtained
from test 2. Cross validation with k=5 and random assignment is enabled
as well. The best model performance hopefully will generate logloss and
prediction accuracies close to H2O with no regularization in test 1.
test4_glm_remove_collinear_columns(): test parameter remove_collinear_columns=True
with lambda set to best lambda from test 2, alpha set to best alpha from Gridsearch
and solver set to the one which generate the smallest validation logloss. The same dataset
is used here except that we randomly choose predictor columns to repeat and scale.
Make sure logloss and prediction accuracies generated here is comparable in value
to H2O GLM model with no regularization.
test5_missing_values(): Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
test6_enum_missing_values(): Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression model and compare our H2O model with it.
test7_missing_enum_values_lambda_search(): Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same prediction data with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# parameters set by users, change with care
max_col_count = 50 # set maximum values of train/test row and column counts
max_col_count_ratio = 500 # set max row count to be multiples of col_count to avoid overfitting
min_col_count_ratio = 100 # set min row count to be multiples of col_count to avoid overfitting
max_p_value = 2 # set maximum predictor value
min_p_value = -2 # set minimum predictor value
max_w_value = 2 # set maximum weight value
min_w_value = -2 # set minimum weight value
enum_levels = 5 # maximum number of levels for categorical variables not counting NAs
class_method = 'probability' # can be 'probability' or 'threshold', control how discrete response is generated
test_class_method = 'probability' # for test data set
margin = 0.0 # only used when class_method = 'threshold'
test_class_margin = 0.2 # for test data set
family = 'binomial' # this test is for Binomial GLM
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets
training_filename = family+"_"+curr_time+"_training_set.csv"
training_filename_duplicate = family+"_"+curr_time+"_training_set_duplicate.csv"
training_filename_nans = family+"_"+curr_time+"_training_set_NA.csv"
training_filename_enum = family+"_"+curr_time+"_training_set_enum.csv"
training_filename_enum_true_one_hot = family+"_"+curr_time+"_training_set_enum_trueOneHot.csv"
training_filename_enum_nans = family+"_"+curr_time+"_training_set_enum_NAs.csv"
training_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_training_set_enum_NAs_trueOneHot.csv"
validation_filename = family+"_"+curr_time+"_validation_set.csv"
validation_filename_enum = family+"_"+curr_time+"_validation_set_enum.csv"
validation_filename_enum_true_one_hot = family+"_"+curr_time+"_validation_set_enum_trueOneHot.csv"
validation_filename_enum_nans = family+"_"+curr_time+"_validation_set_enum_NAs.csv"
validation_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_validation_set_enum_NAs_trueOneHot.csv"
test_filename = family+"_"+curr_time+"_test_set.csv"
test_filename_duplicate = family+"_"+curr_time+"_test_set_duplicate.csv"
test_filename_nans = family+"_"+curr_time+"_test_set_NA.csv"
test_filename_enum = family+"_"+curr_time+"_test_set_enum.csv"
test_filename_enum_true_one_hot = family+"_"+curr_time+"_test_set_enum_trueOneHot.csv"
test_filename_enum_nans = family+"_"+curr_time+"_test_set_enum_NAs.csv"
test_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_test_set_enum_NAs_trueOneHot.csv"
weight_filename = family+"_"+curr_time+"_weight.csv"
weight_filename_enum = family+"_"+curr_time+"_weight_enum.csv"
total_test_number = 7 # total number of tests being run for GLM Binomial family
ignored_eps = 1e-15 # if p-values < than this value, no comparison is performed, only for Gaussian
allowed_diff = 2e-2 # tolerance of comparison for logloss/prediction accuracy
duplicate_col_counts = 5 # maximum number of times to duplicate a column
duplicate_threshold = 0.2 # for each column, a coin is tossed to see if we duplicate that column or not
duplicate_max_scale = 2 # maximum scale factor for duplicated columns
nan_fraction = 0.2 # denote maximum fraction of NA's to be inserted into a column
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
enum_col = 0 # set maximum number of categorical columns in predictor
enum_level_vec = [] # vector containing number of levels for each categorical column
noise_std = 0 # noise variance in Binomial noise generation added to response
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
class_number = 2 # actual number of classes existed in data set, randomly generated later
data_type = 2 # determine data type of data set and weight, 1: integers, 2: real
# parameters denoting filenames with absolute paths
training_data_file = os.path.join(current_dir, training_filename)
training_data_file_duplicate = os.path.join(current_dir, training_filename_duplicate)
training_data_file_nans = os.path.join(current_dir, training_filename_nans)
training_data_file_enum = os.path.join(current_dir, training_filename_enum)
training_data_file_enum_true_one_hot = os.path.join(current_dir, training_filename_enum_true_one_hot)
training_data_file_enum_nans = os.path.join(current_dir, training_filename_enum_nans)
training_data_file_enum_nans_true_one_hot = os.path.join(current_dir, training_filename_enum_nans_true_one_hot)
validation_data_file = os.path.join(current_dir, validation_filename)
validation_data_file_enum = os.path.join(current_dir, validation_filename_enum)
validation_data_file_enum_true_one_hot = os.path.join(current_dir, validation_filename_enum_true_one_hot)
validation_data_file_enum_nans = os.path.join(current_dir, validation_filename_enum_nans)
validation_data_file_enum_nans_true_one_hot = os.path.join(current_dir, validation_filename_enum_nans_true_one_hot)
test_data_file = os.path.join(current_dir, test_filename)
test_data_file_duplicate = os.path.join(current_dir, test_filename_duplicate)
test_data_file_nans = os.path.join(current_dir, test_filename_nans)
test_data_file_enum = os.path.join(current_dir, test_filename_enum)
test_data_file_enum_true_one_hot = os.path.join(current_dir, test_filename_enum_true_one_hot)
test_data_file_enum_nans = os.path.join(current_dir, test_filename_enum_nans)
test_data_file_enum_nans_true_one_hot = os.path.join(current_dir, test_filename_enum_nans_true_one_hot)
weight_data_file = os.path.join(current_dir, weight_filename)
weight_data_file_enum = os.path.join(current_dir, weight_filename_enum)
# store template model performance values for later comparison
test1_model = None # store template model for later comparison
test1_model_metrics = None # store template model test metrics for later comparison
best_lambda = 0.0 # store best lambda obtained using lambda search
test_name = "pyunit_glm_binomial.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training data set, validation and test data sets that are used
# by many tests. We do not want to keep loading them for each set in the hope of
# saving time. Trading off memory and speed here.
x_indices = [] # store predictor indices in the data set
y_index = [] # store response index in the data set
training_data = [] # store training data set
test_data = [] # store test data set
valid_data = [] # store validation data set
training_data_grid = [] # store combined training and validation data set for cross validation
best_alpha = -1 # store best alpha value found
best_grid_logloss = -1 # store lowest MSE found from grid search
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
duplicate_col_indices = [] # denote column indices when column duplication is applied
duplicate_col_scales = [] # store scaling factor for all columns when duplication is applied
noise_var = noise_std*noise_std # Binomial noise variance
test_failed = 0 # count total number of tests that have failed
sklearn_class_weight = {} # used to make sure Sklearn will know the correct number of classes
def __init__(self):
self.setup()
def setup(self):
"""
This function performs all initializations necessary:
1. generates all the random values for our dynamic tests like the Binomial
noise std, column count and row count for training data set;
2. generate the training/validation/test data sets with only real values;
3. insert missing values into training/valid/test data sets.
4. taken the training/valid/test data sets, duplicate random certain columns,
each duplicated column is repeated for a random number of times and randomly scaled;
5. generate the training/validation/test data sets with predictors containing enum
and real values as well***.
6. insert missing values into the training/validation/test data sets with predictors
containing enum and real values as well
*** according to Tomas, when working with mixed predictors (contains both enum/real
value columns), the encoding used is different when regularization is enabled or disabled.
When regularization is enabled, true one hot encoding is enabled to encode the enum
values to binary bits. When regularization is disabled, a reference level plus one hot encoding
is enabled when encoding the enum values to binary bits. One data set is generated
when we work with mixed predictors.
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# randomly set Binomial noise standard deviation as a fraction of actual predictor standard deviation
self.noise_std = random.uniform(0, math.sqrt(pow((self.max_p_value - self.min_p_value), 2) / 12))
self.noise_var = self.noise_std*self.noise_std
# randomly determine data set size in terms of column and row counts
self.train_col_count = random.randint(3, self.max_col_count) # account for enum columns later
self.train_row_count = round(self.train_col_count*random.uniform(self.min_col_count_ratio,
self.max_col_count_ratio))
# # DEBUGGING setup_data, remember to comment them out once done.
# self.train_col_count = 3
# self.train_row_count = 500
# end DEBUGGING
# randomly set number of enum and real columns in the data set
self.enum_col = random.randint(1, self.train_col_count-1)
# randomly set number of levels for each categorical column
self.enum_level_vec = np.random.random_integers(2, self.enum_levels-1, [self.enum_col, 1])
# generate real value weight vector and training/validation/test data sets for GLM
pyunit_utils.write_syn_floating_point_dataset_glm(self.training_data_file,
self.validation_data_file,
self.test_data_file, self.weight_data_file,
self.train_row_count, self.train_col_count, self.data_type,
self.max_p_value, self.min_p_value, self.max_w_value,
self.min_w_value, self.noise_std, self.family,
self.train_row_count, self.train_row_count,
class_number=self.class_number,
class_method=[self.class_method, self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin,
self.test_class_margin])
# randomly generate the duplicated and scaled columns
(self.duplicate_col_indices, self.duplicate_col_scales) = \
pyunit_utils.random_col_duplication(self.train_col_count, self.duplicate_threshold,
self.duplicate_col_counts, True, self.duplicate_max_scale)
# apply the duplication and scaling to training and test set
# need to add the response column to the end of duplicated column indices and scale
dup_col_indices = self.duplicate_col_indices
dup_col_indices.append(self.train_col_count)
dup_col_scale = self.duplicate_col_scales
dup_col_scale.append(1.0)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.training_data_file,
self.training_data_file_duplicate)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.test_data_file,
self.test_data_file_duplicate)
# insert NAs into training/test data sets
pyunit_utils.insert_nan_in_data(self.training_data_file, self.training_data_file_nans, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file, self.test_data_file_nans, self.nan_fraction)
# generate data sets with enum as well as real values
pyunit_utils.write_syn_mixed_dataset_glm(self.training_data_file_enum,
self.training_data_file_enum_true_one_hot,
self.validation_data_file_enum,
self.validation_data_file_enum_true_one_hot,
self.test_data_file_enum, self.test_data_file_enum_true_one_hot,
self.weight_data_file_enum, self.train_row_count, self.train_col_count,
self.max_p_value, self.min_p_value, self.max_w_value, self.min_w_value,
self.noise_std, self.family, self.train_row_count,
self.train_row_count, self.enum_col, self.enum_level_vec,
class_number=self.class_number,
class_method=[self.class_method,
self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin, self.test_class_margin])
# insert NAs into data set with categorical columns
pyunit_utils.insert_nan_in_data(self.training_data_file_enum, self.training_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum, self.validation_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum, self.test_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.training_data_file_enum_true_one_hot,
self.training_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum_true_one_hot,
self.validation_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum_true_one_hot,
self.test_data_file_enum_nans_true_one_hot,
self.nan_fraction)
# only preload data sets that will be used for multiple tests and change the response to enums
self.training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file))
# set indices for response and predictor columns in data set for H2O GLM model to use
self.y_index = self.training_data.ncol-1
self.x_indices = list(range(self.y_index))
# added the round() so that this will work on win8.
self.training_data[self.y_index] = self.training_data[self.y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if self.training_data[self.y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
self.valid_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file))
self.valid_data[self.y_index] = self.valid_data[self.y_index].round().asfactor()
self.test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file))
self.test_data[self.y_index] = self.test_data[self.y_index].round().asfactor()
# make a bigger training set for grid search by combining data from validation data set
self.training_data_grid = self.training_data.rbind(self.valid_data)
# setup_data sklearn class weight of all ones. Used only to make sure sklearn know the correct number of classes
for ind in range(self.class_number):
self.sklearn_class_weight[ind] = 1.0
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def teardown(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
if self.test_failed: # some tests have failed. Need to save data sets for later re-runs
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
if sum(self.test_failed_array[0:4]):
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file, self.validation_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
if sum(self.test_failed_array[0:6]):
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file, self.weight_filename)
if self.test_failed_array[4]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_duplicate, self.test_filename_duplicate)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_duplicate,
self.training_filename_duplicate)
if self.test_failed_array[5]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_nans, self.training_filename_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_nans, self.test_filename_nans)
if self.test_failed_array[6]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans,
self.training_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans, self.test_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
if self.test_failed_array[7]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans_true_one_hot,
self.training_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file_enum_nans_true_one_hot,
self.validation_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans_true_one_hot,
self.test_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else: # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
# remove any csv files left in test directory, do not remove them, shared computing resources
#pyunit_utils.remove_csv_files(self.current_dir, ".csv")
def test1_glm_no_regularization(self):
"""
In this test, a sklearn logistic regression model and a H2O GLM are built for Binomial family with the same
random data sets. We observe the weights, confusion matrices from the two models. We compare the logloss,
prediction accuracy from the two models to determine if H2O GLM model shall pass the test.
"""
print("*******************************************************************************************")
print("Test1: build H2O GLM with Binomial with no regularization.")
h2o.cluster_info()
# training result from python Sklearn logistic regression model
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file, self.test_data_file, False, False)
# build our H2O model
self.test1_model = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0)
self.test1_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data)
# calculate test metrics
self.test1_model_metrics = self.test1_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed # used to determine if the current test has failed
# print out comparison results for weight/logloss/prediction accuracy
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(self.test1_model,
self.test1_model_metrics,
self.family, "\nTest1 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from"
" test dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
"dataset ...."],
h2o_att_str=[
"H2O intercept and weights: \n",
"H2O logloss from training dataset: ",
"H2O logloss from test dataset",
"H2O confusion matrix from training "
"dataset: \n",
"H2O confusion matrix from test"
" dataset: \n",
"H2O accuracy from training dataset: ",
"H2O accuracy from test dataset: "],
template_att_str=[
"Sklearn intercept and weights: \n",
"Sklearn logloss from training "
"dataset: ",
"Sklearn logloss from test dataset: ",
"Sklearn confusion matrix from"
" training dataset: \n",
"Sklearn confusion matrix from test "
"dataset: \n",
"Sklearn accuracy from training "
"dataset: ",
"Sklearn accuracy from test "
"dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too "
"much!", "", "",
"Accuracies from training dataset "
"differ too much!",
"Accuracies from test dataset differ "
"too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are "
"close enough!",
"Logloss from test dataset are close "
"enough!", "", "",
"Accuracies from training dataset are "
"close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test1_glm_no_regularization",
num_test_failed, self.test_failed)
self.test_num += 1 # update test index
def test2_glm_lambda_search(self):
"""
This test is used to test the lambda search. Recall that lambda search enables efficient and
automatic search for the optimal value of the lambda parameter. When lambda search is enabled,
GLM will first fit a model with maximum regularization and then keep decreasing it until
over-fitting occurs. The resulting model is based on the best lambda value. According to Tomas,
set alpha = 0.5 and enable validation but not cross-validation.
"""
print("*******************************************************************************************")
print("Test2: tests the lambda search.")
h2o.cluster_info()
# generate H2O model with lambda search enabled
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20)
model_h2o_0p5.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data,
validation_frame=self.valid_data)
# get best lambda here
self.best_lambda = pyunit_utils.get_train_glm_params(model_h2o_0p5, 'best_lambda')
# get test performance here
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest2 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O lambda search intercept and "
"weights: \n",
"H2O lambda search logloss from"
" training dataset: ",
"H2O lambda search logloss from test "
"dataset",
"H2O lambda search confusion matrix "
"from training dataset: \n",
"H2O lambda search confusion matrix "
"from test dataset: \n",
"H2O lambda search accuracy from "
"training dataset: ",
"H2O lambda search accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from "
"training dataset: ",
"H2O test1 template logloss from "
"test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from "
"training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, False, True, True, True,
True, True],
just_print=[True, False, False, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test2_glm_lambda_search",
num_test_failed, self.test_failed)
self.test_num += 1
def test3_glm_grid_search(self):
"""
This test is used to test GridSearch with the following parameters:
1. Lambda = best_lambda value from test2
2. alpha = [0 0.5 0.99]
3. cross-validation with k = 5, fold_assignment = "Random"
We will look at the best results from the grid search and compare it with H2O model built in test 1.
:return: None
"""
print("*******************************************************************************************")
print("Test3: explores various parameter settings in training the GLM using GridSearch using solver ")
h2o.cluster_info()
hyper_parameters = {'alpha': [0, 0.5, 0.99]} # set hyper_parameters for grid search
# train H2O GLM model with grid search
model_h2o_gridsearch = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, nfolds=5,
fold_assignment='Random'), hyper_parameters)
model_h2o_gridsearch.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data_grid)
# print out the model sequence ordered by the best validation logloss values, thanks Ludi!
temp_model = model_h2o_gridsearch.sort_by("logloss(xval=True)")
# obtain the model ID of best model (with smallest MSE) and use that for our evaluation
best_model_id = temp_model['Model Id'][0]
self.best_grid_logloss = temp_model['logloss(xval=True)'][0]
self.best_alpha = model_h2o_gridsearch.get_hyperparams(best_model_id)
best_model = h2o.get_model(best_model_id)
best_model_test_metrics = best_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with H2O model from test 1
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(best_model, best_model_test_metrics,
self.family,
"\nTest3 " + " Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test dataset"
" ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
" sdataset ...."],
h2o_att_str=[
"H2O grid search intercept and "
"weights: \n",
"H2O grid search logloss from training"
" dataset: ",
"H2O grid search logloss from test "
"dataset",
"H2O grid search confusion matrix from"
" training dataset: \n",
"H2O grid search confusion matrix from"
" test dataset: \n",
"H2O grid search accuracy from"
" training dataset: ",
"H2O grid search accuracy from test "
"dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test_glm_grid_search_over_params",
num_test_failed, self.test_failed)
self.test_num += 1
def test4_glm_remove_collinear_columns(self):
"""
With the best parameters obtained from test 3 grid search, we will trained GLM
with duplicated columns and enable remove_collinear_columns and see if the
algorithm catches the duplicated columns. We will compare the results with test
1 results.
"""
print("*******************************************************************************************")
print("Test4: test the GLM remove_collinear_columns.")
h2o.cluster_info()
# read in training data sets with duplicated columns
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_duplicate))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_duplicate))
y_index = training_data.ncol-1
x_indices = list(range(y_index))
# change response variable to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model with remove_collinear_columns=True
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, alpha=self.best_alpha,
remove_collinear_columns=True)
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
print("Best lambda is {0}, best alpha is {1}".format(self.best_lambda, self.best_alpha))
# evaluate model over test data set
model_h2o_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, model_h2o_metrics, self.family,
"\nTest3 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O remove_collinear_columns "
"intercept and weights: \n",
"H2O remove_collinear_columns"
" logloss from training dataset: ",
"H2O remove_collinear_columns"
" logloss from test dataset",
"H2O remove_collinear_columns"
" confusion matrix from "
"training dataset: \n",
"H2O remove_collinear_columns"
" confusion matrix from"
" test dataset: \n",
"H2O remove_collinear_columns"
" accuracy from"
" training dataset: ",
"H2O remove_collinear_columns"
" accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test4_glm_remove_collinear_columns",
num_test_failed, self.test_failed)
self.test_num += 1
def test5_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
"""
print("*******************************************************************************************")
print("Test5: test the GLM with imputation of missing values with column averages.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_nans, self.test_data_file_nans, False, False)
# import training set and test set
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_nans))
# change the response columns to be categorical
training_data[self.y_index] = training_data[self.y_index].round().asfactor()
test_data[self.y_index] = test_data[self.y_index].round().asfactor()
# train H2O models with missing_values_handling="MeanImputation"
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=self.x_indices, y=self.y_index, training_frame=training_data)
# calculate H2O model performance with test data set
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest5 Done!",
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O missing values intercept and"
" weights: \n",
"H2O missing values logloss from"
" training dataset: ",
"H2O missing values logloss from"
" test dataset",
"H2O missing values confusion matrix"
" from training dataset: \n",
"H2O missing values confusion matrix"
" from test dataset: \n",
"H2O missing values accuracy from"
" training dataset: ",
"H2O missing values accuracy from"
" test dataset: "],
template_att_str=[
"Sklearn missing values intercept"
" and weights: \n",
"Sklearn missing values logloss from"
" training dataset: ",
"Sklearn missing values logloss from"
" test dataset: ",
"Sklearn missing values confusion"
" matrix from training dataset: \n",
"Sklearn missing values confusion"
" matrix from test dataset: \n",
"Sklearn missing values accuracy"
" from training dataset: ",
"Sklearn missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ"
" too much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if tests have failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test5_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test6_enum_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression and compare our H2O models with it.
"""
# no regularization in this case, use reference level plus one-hot-encoding
print("*******************************************************************************************")
print("Test6: test the GLM with enum/real values.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans, self.test_data_file_enum_nans, True, False)
# import training set and test set with missing values
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response variables to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
test_data[y_index] = test_data[y_index].round().asfactor()
# generate H2O model
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest6 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, "
"no regularization and missing values"
" intercept and weights: \n",
"H2O with enum/real values, no "
"regularization and missing values"
" logloss from training dataset: ",
"H2O with enum/real values, no"
" regularization and missing values"
" logloss from test dataset",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from training"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from test"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values "
"accuracy from training dataset: ",
"H2O with enum/real values, no "
"regularization and missing values"
" accuracy from test dataset: "],
template_att_str=[
"Sklearn missing values intercept "
"and weights: \n",
"Sklearn with enum/real values, no"
" regularization and missing values"
" logloss from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values"
" logloss from test dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from training"
" dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from test "
"dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
h2o.cluster_info()
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test6_enum_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test7_missing_enum_values_lambda_search(self):
"""
Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same predictors with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# perform lambda_search, regularization and one hot encoding.
print("*******************************************************************************************")
print("Test7: test the GLM with imputation of missing enum/real values under lambda search.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans,
self.test_data_file_enum_nans_true_one_hot, True, True,
validation_data_file=self.validation_data_file_enum_nans_true_one_hot)
# import training set and test set with missing values and true one hot encoding
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans_true_one_hot))
validation_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file_enum_nans_true_one_hot))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans_true_one_hot))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
validation_data[ind] = validation_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response column to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
validation_data[y_index] = validation_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20, missing_values_handling="MeanImputation")
model_h2o_0p5.train(x=x_indices, y=y_index, training_frame=training_data, validation_frame=validation_data)
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest7 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, lamba "
"search and missing values intercept"
" and weights: \n",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from test dataset",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from training dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from test dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from test dataset: "],
template_att_str=[
"Sklearn with enum/real values, lamba"
" search and missing values intercept"
" and weights: \n",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from test dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from training dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from test dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "", "Accuracies from"
" training dataset differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += \
pyunit_utils.show_test_results("test7_missing_enum_values_lambda_search", num_test_failed, self.test_failed)
self.test_num += 1
def sklearn_binomial_result(self, training_data_file, test_data_file, has_categorical, true_one_hot,
validation_data_file=""):
"""
This function will generate a Sklearn logistic model using the same set of data sets we have used to build
our H2O models. The purpose here is to be able to compare the performance of H2O
models with the Sklearn model built here. This is useful in cases where theoretical solutions
do not exist. If the data contains missing values, mean imputation is applied to the data set before
a Sklearn model is built. In addition, if there are enum columns in predictors and also missing values,
the same encoding and missing value imputation method used by H2O is applied to the data set before we build
the Sklearn model.
:param training_data_file: string storing training data set filename with directory path.
:param test_data_file: string storing test data set filename with directory path.
:param has_categorical: bool indicating if we data set contains mixed predictors (both enum and real)
:param true_one_hot: bool True: true one hot encoding is used. False: reference level plus one hot encoding
is used
:param validation_data_file: optional string, denoting validation file so that we can concatenate
training and validation data sets into a big training set since H2O model is using a training
and a validation data set.
:return: a tuple containing the weights, logloss, confusion matrix, prediction accuracy calculated on training
data set and test data set respectively.
"""
# read in the training data into a matrix
training_data_xy = np.asmatrix(np.genfromtxt(training_data_file, delimiter=',', dtype=None))
test_data_xy = np.asmatrix(np.genfromtxt(test_data_file, delimiter=',', dtype=None))
if len(validation_data_file) > 0: # validation data set exist and add it to training_data
temp_data_xy = np.asmatrix(np.genfromtxt(validation_data_file, delimiter=',', dtype=None))
training_data_xy = np.concatenate((training_data_xy, temp_data_xy), axis=0)
# if predictor contains categorical data, perform encoding of enums to binary bits
# for missing categorical enums, a new level is created for the nans
if has_categorical:
training_data_xy = pyunit_utils.encode_enum_dataset(training_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
test_data_xy = pyunit_utils.encode_enum_dataset(test_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
# replace missing values for real value columns with column mean before proceeding for training/test data sets
if np.isnan(training_data_xy).any():
inds = np.where(np.isnan(training_data_xy))
col_means = stats.nanmean(training_data_xy, axis=0)
training_data_xy[inds] = np.take(col_means, inds[1])
if np.isnan(test_data_xy).any():
# replace the actual means with column means from training
inds = np.where(np.isnan(test_data_xy))
test_data_xy = pyunit_utils.replace_nan_with_mean(test_data_xy, inds, col_means)
# now data is ready to be massaged into format that sklearn can use
(response_y, x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(training_data_xy)
(t_response_y, t_x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(test_data_xy)
# train the sklearn Model
sklearn_model = LogisticRegression(class_weight=self.sklearn_class_weight)
sklearn_model = sklearn_model.fit(x_mat, response_y)
# grab the performance metrics on training data set
accuracy_training = sklearn_model.score(x_mat, response_y)
weights = sklearn_model.coef_
p_response_y = sklearn_model.predict(x_mat)
log_prob = sklearn_model.predict_log_proba(x_mat)
logloss_training = self.logloss_sklearn(response_y, log_prob)
cm_train = metrics.confusion_matrix(response_y, p_response_y)
# grab the performance metrics on the test data set
p_response_y = sklearn_model.predict(t_x_mat)
log_prob = sklearn_model.predict_log_proba(t_x_mat)
logloss_test = self.logloss_sklearn(t_response_y, log_prob)
cm_test = metrics.confusion_matrix(t_response_y, p_response_y)
accuracy_test = metrics.accuracy_score(t_response_y, p_response_y)
return weights, logloss_training, cm_train, accuracy_training, logloss_test, cm_test, accuracy_test
def logloss_sklearn(self, true_y, log_prob):
"""
This function calculate the average logloss for SKlean model given the true response (trueY) and the log
probabilities (logProb).
:param true_y: array denoting the true class label
:param log_prob: matrix containing the log of Prob(Y=0) and Prob(Y=1)
:return: average logloss.
"""
(num_row, num_class) = log_prob.shape
logloss = 0.0
for ind in range(num_row):
logloss += log_prob[ind, int(true_y[ind])]
return -1.0 * logloss / num_row
def test_glm_binomial():
"""
Create and instantiate TestGLMBinomial class and perform tests specified for GLM
Binomial family.
:return: None
"""
test_glm_binomial = TestGLMBinomial()
test_glm_binomial.test1_glm_no_regularization()
test_glm_binomial.test2_glm_lambda_search()
test_glm_binomial.test3_glm_grid_search()
test_glm_binomial.test4_glm_remove_collinear_columns()
# test_glm_binomial.test_num += 1
test_glm_binomial.test5_missing_values()
test_glm_binomial.test6_enum_missing_values()
test_glm_binomial.test7_missing_enum_values_lambda_search()
test_glm_binomial.teardown()
sys.stdout.flush()
if test_glm_binomial.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_binomial)
else:
test_glm_binomial()
|
YzPaul3/h2o-3
|
h2o-py/tests/testdir_dynamic_tests/testdir_algos/glm/pyunit_glm_binomial_large.py
|
Python
|
apache-2.0
| 114,537
|
[
"Gaussian"
] |
9b77ae9815265ea73644dc1d534bb96ac36fc849ce4b0987c95d51af029eddea
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
import numpy as np
from pymatgen.command_line.bader_caller import (
BaderAnalysis,
bader_analysis_from_path,
which,
)
from pymatgen.util.testing import PymatgenTest
@unittest.skipIf(not which("bader"), "bader executable not present.")
class BaderAnalysisTest(unittest.TestCase):
_multiprocess_shared_ = True
def setUp(self):
warnings.catch_warnings()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
# test with reference file
analysis = BaderAnalysis(
chgcar_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "CHGCAR.Fe3O4"),
potcar_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "POTCAR.Fe3O4"),
chgref_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "CHGCAR.Fe3O4_ref"),
)
self.assertEqual(len(analysis.data), 14)
self.assertAlmostEqual(analysis.data[0]["charge"], 6.6136782, 3)
self.assertEqual(analysis.data[0]["charge"], analysis.get_charge(0))
self.assertAlmostEqual(analysis.nelectrons, 96)
self.assertAlmostEqual(analysis.vacuum_charge, 0)
ans = [
-1.3863218,
-1.3812175,
-1.3812175,
-1.2615902,
-1.3812175,
-1.3862971,
1.021523,
1.024357,
1.021523,
1.021523,
1.021523,
1.021523,
1.021523,
1.024357,
]
for i in range(14):
self.assertAlmostEqual(ans[i], analysis.get_charge_transfer(i), 3)
self.assertEqual(analysis.get_partial_charge(0), -analysis.get_charge_transfer(0))
s = analysis.get_oxidation_state_decorated_structure()
self.assertAlmostEqual(s[0].specie.oxi_state, 1.3863218, 3)
# make sure bader still runs without reference file
analysis = BaderAnalysis(chgcar_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "CHGCAR.Fe3O4"))
self.assertEqual(len(analysis.data), 14)
# Test Cube file format parsing
analysis = BaderAnalysis(cube_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "bader/elec.cube.gz"))
self.assertEqual(len(analysis.data), 9)
def test_from_path(self):
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "bader")
analysis = BaderAnalysis.from_path(test_dir)
chgcar = os.path.join(test_dir, "CHGCAR.gz")
chgref = os.path.join(test_dir, "_CHGCAR_sum.gz")
analysis0 = BaderAnalysis(chgcar_filename=chgcar, chgref_filename=chgref)
charge = np.array(analysis.summary["charge"])
charge0 = np.array(analysis0.summary["charge"])
self.assertTrue(np.allclose(charge, charge0))
if os.path.exists("CHGREF"):
os.remove("CHGREF")
def test_automatic_runner(self):
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "bader")
summary = bader_analysis_from_path(test_dir)
"""
Reference summary dict (with bader 1.0)
summary_ref = {
'magmom': [4.298761, 4.221997, 4.221997, 3.816685, 4.221997, 4.298763, 0.36292,
0.370516, 0.36292, 0.36292, 0.36292, 0.36292, 0.36292, 0.370516],
'min_dist': [0.835789, 0.92947, 0.92947, 0.973007, 0.92947, 0.835789, 0.94067,
0.817381, 0.94067, 0.94067, 0.94067, 0.94067, 0.94067, 0.817381],
'vacuum_charge': 0.0,
'vacuum_volume': 0.0,
'atomic_volume': [9.922887, 8.175158, 8.175158, 9.265802, 8.175158, 9.923233, 12.382546,
12.566972, 12.382546, 12.382546, 12.382546, 12.382546, 12.382546, 12.566972],
'charge': [12.248132, 12.26177, 12.26177, 12.600596, 12.26177, 12.248143, 7.267303,
7.256998, 7.267303, 7.267303, 7.267303, 7.267303, 7.267303, 7.256998],
'bader_version': 1.0,
'reference_used': True
}
"""
self.assertEqual(
set(summary.keys()),
{
"magmom",
"min_dist",
"vacuum_charge",
"vacuum_volume",
"atomic_volume",
"charge",
"bader_version",
"reference_used",
},
)
self.assertTrue(summary["reference_used"])
self.assertAlmostEqual(sum(summary["magmom"]), 28, places=1)
def test_atom_parsing(self):
# test with reference file
analysis = BaderAnalysis(
chgcar_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "CHGCAR.Fe3O4"),
potcar_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "POTCAR.Fe3O4"),
chgref_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "CHGCAR.Fe3O4_ref"),
parse_atomic_densities=True,
)
self.assertEqual(len(analysis.atomic_densities), len(analysis.chgcar.structure))
self.assertAlmostEqual(
np.sum(analysis.chgcar.data["total"]),
np.sum([np.sum(d["data"]) for d in analysis.atomic_densities]),
)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/command_line/tests/test_bader_caller.py
|
Python
|
mit
| 5,344
|
[
"pymatgen"
] |
dd2c887e63cdd72edef58b00f19e31b3931f58008a59c14878964237fdb4ca9d
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html5lib import treebuilders, inputstream
from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE
from xhtml2pdf.default import BOX, POS, MUST, FONT
from xhtml2pdf.util import getSize, getBool, toList, getColor, getAlign
from xhtml2pdf.util import getBox, getPos, pisaTempFile
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import PageBreak, KeepInFrame
from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak
from xhtml2pdf.tags import * # TODO: Kill wild import!
from xhtml2pdf.tables import * # TODO: Kill wild import!
from xhtml2pdf.util import * # TODO: Kill wild import!
from xml.dom import Node
import copy
import html5lib
import logging
import re
import types
import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface
import xml.dom.minidom
CSSAttrCache = {}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
global TAGS
attrs = {}
if attributes:
for k, v in attributes.items():
try:
attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names
except:
attrs[k] = v
nattrs = {}
if tag in TAGS:
block, adef = TAGS[tag]
adef["id"] = STRING
# print block, adef
for k, v in adef.iteritems():
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == types.TupleType:
if v[1] == MUST:
if k not in attrs:
log.warn(c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == types.ListType:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = getSize(nv)
except:
log.warn(c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = getBox(nv, c.pageSize)
elif v == POS:
nv = getPos(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = getColor(nv)
elif v == FILE:
nv = c.getFile(nv)
elif v == FONT:
nv = c.getFontName(nv)
nattrs[k] = nv
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
letter-spacing
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-width
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list-style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parseInline(self.cssElement.getStyleAttr() or '')[0]
if attrName in style:
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
#TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for k, v in node.attributes.items():
if k == 'class':
_cl = v
elif k == 'id':
_id = v
elif k == 'style':
_st = v
return "%s#%s#%s#%s#%s" % (id(node.parentNode), node.tagName.lower(), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
#return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
node.cssAttrs = CachedCSSAttr
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName)
#except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def CSS2Frag(c, kw, isBlock):
# COLORS
if "color" in c.cssAttr:
c.frag.textColor = getColor(c.cssAttr["color"])
if "background-color" in c.cssAttr:
c.frag.backColor = getColor(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if "font-family" in c.cssAttr:
c.frag.fontName = c.getFontName(c.cssAttr["font-family"])
if "font-size" in c.cssAttr:
# XXX inherit
c.frag.fontSize = max(getSize("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0)
if "line-height" in c.cssAttr:
leading = "".join(c.cssAttr["line-height"])
c.frag.leading = getSize(leading, c.frag.fontSize)
c.frag.leadingSource = leading
else:
c.frag.leading = getSize(c.frag.leadingSource, c.frag.fontSize)
if "letter-spacing" in c.cssAttr:
c.frag.letterSpacing = c.cssAttr["letter-spacing"]
if "-pdf-line-spacing" in c.cssAttr:
c.frag.leadingSpace = getSize("".join(c.cssAttr["-pdf-line-spacing"]))
# print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading
if "font-weight" in c.cssAttr:
value = c.cssAttr["font-weight"].lower()
if value in ("bold", "bolder", "500", "600", "700", "800", "900"):
c.frag.bold = 1
else:
c.frag.bold = 0
for value in toList(c.cssAttr.get("text-decoration", "")):
if "underline" in value:
c.frag.underline = 1
if "line-through" in value:
c.frag.strike = 1
if "none" in value:
c.frag.underline = 0
c.frag.strike = 0
if "font-style" in c.cssAttr:
value = c.cssAttr["font-style"].lower()
if value in ("italic", "oblique"):
c.frag.italic = 1
else:
c.frag.italic = 0
if "white-space" in c.cssAttr:
# normal | pre | nowrap
c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower()
# ALIGN & VALIGN
if "text-align" in c.cssAttr:
c.frag.alignment = getAlign(c.cssAttr["text-align"])
if "vertical-align" in c.cssAttr:
c.frag.vAlign = c.cssAttr["vertical-align"]
# HEIGHT & WIDTH
if "height" in c.cssAttr:
c.frag.height = "".join(toList(c.cssAttr["height"])) # XXX Relative is not correct!
if c.frag.height in ("auto",):
c.frag.height = None
if "width" in c.cssAttr:
c.frag.width = "".join(toList(c.cssAttr["width"])) # XXX Relative is not correct!
if c.frag.width in ("auto",):
c.frag.width = None
# ZOOM
if "zoom" in c.cssAttr:
zoom = "".join(toList(c.cssAttr["zoom"])) # XXX Relative is not correct!
if zoom.endswith("%"):
zoom = float(zoom[: - 1]) / 100.0
c.frag.zoom = float(zoom)
# MARGINS & LIST INDENT, STYLE
if isBlock:
if "margin-top" in c.cssAttr:
c.frag.spaceBefore = getSize(c.cssAttr["margin-top"], c.frag.fontSize)
if "margin-bottom" in c.cssAttr:
c.frag.spaceAfter = getSize(c.cssAttr["margin-bottom"], c.frag.fontSize)
if "margin-left" in c.cssAttr:
c.frag.bulletIndent = kw["margin-left"] # For lists
kw["margin-left"] += getSize(c.cssAttr["margin-left"], c.frag.fontSize)
c.frag.leftIndent = kw["margin-left"]
if "margin-right" in c.cssAttr:
kw["margin-right"] += getSize(c.cssAttr["margin-right"], c.frag.fontSize)
c.frag.rightIndent = kw["margin-right"]
if "text-indent" in c.cssAttr:
c.frag.firstLineIndent = getSize(c.cssAttr["text-indent"], c.frag.fontSize)
if "list-style-type" in c.cssAttr:
c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower()
if "list-style-image" in c.cssAttr:
c.frag.listStyleImage = c.getFile(c.cssAttr["list-style-image"])
# PADDINGS
if isBlock:
if "padding-top" in c.cssAttr:
c.frag.paddingTop = getSize(c.cssAttr["padding-top"], c.frag.fontSize)
if "padding-bottom" in c.cssAttr:
c.frag.paddingBottom = getSize(c.cssAttr["padding-bottom"], c.frag.fontSize)
if "padding-left" in c.cssAttr:
c.frag.paddingLeft = getSize(c.cssAttr["padding-left"], c.frag.fontSize)
if "padding-right" in c.cssAttr:
c.frag.paddingRight = getSize(c.cssAttr["padding-right"], c.frag.fontSize)
# BORDERS
if isBlock:
if "border-top-width" in c.cssAttr:
c.frag.borderTopWidth = getSize(c.cssAttr["border-top-width"], c.frag.fontSize)
if "border-bottom-width" in c.cssAttr:
c.frag.borderBottomWidth = getSize(c.cssAttr["border-bottom-width"], c.frag.fontSize)
if "border-left-width" in c.cssAttr:
c.frag.borderLeftWidth = getSize(c.cssAttr["border-left-width"], c.frag.fontSize)
if "border-right-width" in c.cssAttr:
c.frag.borderRightWidth = getSize(c.cssAttr["border-right-width"], c.frag.fontSize)
if "border-top-style" in c.cssAttr:
c.frag.borderTopStyle = c.cssAttr["border-top-style"]
if "border-bottom-style" in c.cssAttr:
c.frag.borderBottomStyle = c.cssAttr["border-bottom-style"]
if "border-left-style" in c.cssAttr:
c.frag.borderLeftStyle = c.cssAttr["border-left-style"]
if "border-right-style" in c.cssAttr:
c.frag.borderRightStyle = c.cssAttr["border-right-style"]
if "border-top-color" in c.cssAttr:
c.frag.borderTopColor = getColor(c.cssAttr["border-top-color"])
if "border-bottom-color" in c.cssAttr:
c.frag.borderBottomColor = getColor(c.cssAttr["border-bottom-color"])
if "border-left-color" in c.cssAttr:
c.frag.borderLeftColor = getColor(c.cssAttr["border-left-color"])
if "border-right-color" in c.cssAttr:
c.frag.borderRightColor = getColor(c.cssAttr["border-right-color"])
def pisaPreLoop(node, context, collect=False):
"""
Collect all CSS definitions
"""
data = u""
if node.nodeType == Node.TEXT_NODE and collect:
data = node.data
elif node.nodeType == Node.ELEMENT_NODE:
name = node.tagName.lower()
if name in ("style", "link"):
attr = pisaGetAttributes(context, name, node.attributes)
media = [x.strip() for x in attr.media.lower().split(",") if x.strip()]
if attr.get("type", "").lower() in ("", "text/css") and \
(not media or "all" in media or "print" in media or "pdf" in media):
if name == "style":
for node in node.childNodes:
data += pisaPreLoop(node, context, collect=True)
context.addCSS(data)
return u""
if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
# print "CSS LINK", attr
context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media)))
for node in node.childNodes:
result = pisaPreLoop(node, context, collect=collect)
if collect:
data += result
return data
def pisaLoop(node, context, path=None, **kw):
if path is None:
path = []
# Initialize KW
if not kw:
kw = {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}
else:
kw = copy.copy(kw)
#indent = len(path) * " " # only used for debug print statements
# TEXT
if node.nodeType == Node.TEXT_NODE:
# print indent, "#", repr(node.data) #, context.frag
context.addFrag(node.data)
# context.text.append(node.value)
# ELEMENT
elif node.nodeType == Node.ELEMENT_NODE:
node.tagName = node.tagName.replace(":", "").lower()
if node.tagName in ("style", "script"):
return
path = copy.copy(path) + [node.tagName]
# Prepare attributes
attr = pisaGetAttributes(context, node.tagName, node.attributes)
#log.debug(indent + "<%s %s>" % (node.tagName, attr) + repr(node.attributes.items())) #, path
# Calculate styles
context.cssAttr = CSSCollect(node, context)
context.node = node
# Block?
PAGE_BREAK = 1
PAGE_BREAK_RIGHT = 2
PAGE_BREAK_LEFT = 3
pageBreakAfter = False
frameBreakAfter = False
display = context.cssAttr.get("display", "inline").lower()
# print indent, node.tagName, display, context.cssAttr.get("background-color", None), attr
isBlock = (display == "block")
if isBlock:
context.addPara()
# Page break by CSS
if "-pdf-next-page" in context.cssAttr:
context.addStory(NextPageTemplate(str(context.cssAttr["-pdf-next-page"])))
if "-pdf-page-break" in context.cssAttr:
if str(context.cssAttr["-pdf-page-break"]).lower() == "before":
context.addStory(PageBreak())
if "-pdf-frame-break" in context.cssAttr:
if str(context.cssAttr["-pdf-frame-break"]).lower() == "before":
context.addStory(FrameBreak())
if str(context.cssAttr["-pdf-frame-break"]).lower() == "after":
frameBreakAfter = True
if "page-break-before" in context.cssAttr:
if str(context.cssAttr["page-break-before"]).lower() == "always":
context.addStory(PageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "right":
context.addStory(PageBreak())
context.addStory(PmlRightPageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "left":
context.addStory(PageBreak())
context.addStory(PmlLeftPageBreak())
if "page-break-after" in context.cssAttr:
if str(context.cssAttr["page-break-after"]).lower() == "always":
pageBreakAfter = PAGE_BREAK
if str(context.cssAttr["page-break-after"]).lower() == "right":
pageBreakAfter = PAGE_BREAK_RIGHT
if str(context.cssAttr["page-break-after"]).lower() == "left":
pageBreakAfter = PAGE_BREAK_LEFT
if display == "none":
# print "none!"
return
# Translate CSS to frags
# Save previous frag styles
context.pushFrag()
# Map styles to Reportlab fragment properties
CSS2Frag(context, kw, isBlock)
# EXTRAS
if "-pdf-keep-with-next" in context.cssAttr:
context.frag.keepWithNext = getBool(context.cssAttr["-pdf-keep-with-next"])
if "-pdf-outline" in context.cssAttr:
context.frag.outline = getBool(context.cssAttr["-pdf-outline"])
if "-pdf-outline-level" in context.cssAttr:
context.frag.outlineLevel = int(context.cssAttr["-pdf-outline-level"])
if "-pdf-outline-open" in context.cssAttr:
context.frag.outlineOpen = getBool(context.cssAttr["-pdf-outline-open"])
if "-pdf-word-wrap" in context.cssAttr:
context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"]
# handle keep-in-frame
keepInFrameMode = None
keepInFrameMaxWidth = 0
keepInFrameMaxHeight = 0
if "-pdf-keep-in-frame-mode" in context.cssAttr:
value = str(context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower()
if value in ("shrink", "error", "overflow", "truncate"):
keepInFrameMode = value
if "-pdf-keep-in-frame-max-width" in context.cssAttr:
keepInFrameMaxWidth = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-width"]))
if "-pdf-keep-in-frame-max-height" in context.cssAttr:
keepInFrameMaxHeight = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-height"]))
# ignore nested keep-in-frames, tables have their own KIF handling
keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None
if keepInFrame:
# keep track of current story index, so we can wrap everythink
# added after this point in a KeepInFrame
context.keepInFrameIndex = len(context.story)
# BEGIN tag
klass = globals().get("pisaTag%s" % node.tagName.replace(":", "").upper(), None)
obj = None
# Static block
elementId = attr.get("id", None)
staticFrame = context.frameStatic.get(elementId, None)
if staticFrame:
context.frag.insideStaticFrame += 1
oldStory = context.swapStory()
# Tag specific operations
if klass is not None:
obj = klass(node, attr)
obj.start(context)
# Visit child nodes
context.fragBlock = fragBlock = copy.copy(context.frag)
for nnode in node.childNodes:
pisaLoop(nnode, context, path, **kw)
context.fragBlock = fragBlock
# END tag
if obj:
obj.end(context)
# Block?
if isBlock:
context.addPara()
# XXX Buggy!
# Page break by CSS
if pageBreakAfter:
context.addStory(PageBreak())
if pageBreakAfter == PAGE_BREAK_RIGHT:
context.addStory(PmlRightPageBreak())
if pageBreakAfter == PAGE_BREAK_LEFT:
context.addStory(PmlLeftPageBreak())
if frameBreakAfter:
context.addStory(FrameBreak())
if keepInFrame:
# get all content added after start of -pdf-keep-in-frame and wrap
# it in a KeepInFrame
substory = context.story[context.keepInFrameIndex:]
context.story = context.story[:context.keepInFrameIndex]
context.story.append(
KeepInFrame(
content=substory,
maxWidth=keepInFrameMaxWidth,
maxHeight=keepInFrameMaxHeight))
context.keepInFrameIndex = None
# Static block, END
if staticFrame:
context.addPara()
for frame in staticFrame:
frame.pisaStaticStory = context.story
context.swapStory(oldStory)
context.frag.insideStaticFrame -= 1
# context.debug(1, indent, "</%s>" % (node.tagName))
# Reset frag style
context.pullFrag()
# Unknown or not handled
else:
# context.debug(1, indent, "???", node, node.nodeType, repr(node))
# Loop over children
for node in node.childNodes:
pisaLoop(node, context, path, **kw)
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None):
"""
- Parse HTML and get miniDOM
- Extract CSS informations, add default CSS, parse CSS
- Handle the document DOM itself and build reportlab story
- Return Context object
"""
global CSSAttrCache
CSSAttrCache = {}
if xhtml:
#TODO: XHTMLParser doesn't see to exist...
parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom"))
else:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
if type(src) in types.StringTypes:
if type(src) is types.UnicodeType:
# If an encoding was provided, do not change it.
if not encoding:
encoding = "utf-8"
src = src.encode(encoding)
src = pisaTempFile(src, capacity=context.capacity)
# Test for the restrictions of html5lib
if encoding:
# Workaround for html5lib<0.11.1
if hasattr(inputstream, "isValidEncoding"):
if encoding.strip().lower() == "utf8":
encoding = "utf-8"
if not inputstream.isValidEncoding(encoding):
log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding)
else:
if inputstream.codecName(encoding) is None:
log.error("%r is not a valid encoding", encoding)
document = parser.parse(
src,
encoding=encoding)
if xml_output:
if encoding:
xml_output.write(document.toprettyxml(encoding=encoding))
else:
xml_output.write(document.toprettyxml(encoding="utf8"))
if default_css:
context.addDefaultCSS(default_css)
pisaPreLoop(document, context)
#try:
context.parseCSS()
#except:
# context.cssText = DEFAULT_CSS
# context.parseCSS()
# context.debug(9, pprint.pformat(context.css))
pisaLoop(document, context)
return context
# Shortcuts
HTML2PDF = pisaParser
def XHTML2PDF(*a, **kw):
kw["xhtml"] = True
return HTML2PDF(*a, **kw)
XML2PDF = XHTML2PDF
|
redclov3r/xhtml2pdf
|
xhtml2pdf/parser.py
|
Python
|
apache-2.0
| 24,459
|
[
"VisIt"
] |
97a7f09e51e23122751404dfdc3cabdf6d2b89fcfcc6903c433aa35fedb1b981
|
""" Python test discovery, setup and run of test functions. """
import re
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
return safe_getattr(obj, '__test__', False)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj))
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
try:
mod = self.fspath.pyimport(ensuresyspath="append")
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if inspect.getargspec(setup_module)[0]:
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if inspect.getargspec(fin)[0]:
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtype, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
getattr(self, valtype)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect=True to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: if True each argvalue corresponding to an argname will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
if not indirect:
#XXX should we also check for the opposite case?
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(
self.function, arg))
valtype = indirect and "params" or "funcargs"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtype, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises @expected_exception
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = inspect.formatargspec(*inspect.getargspec(factory))
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
if argname not in func_params:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
|
pelme/pytest
|
_pytest/python.py
|
Python
|
mit
| 83,887
|
[
"VisIt"
] |
2800d4077c880e8f4d456c1d2532e106f010be26f612ecbf7f603977969b1974
|
import cairocffi as cairo
import numpy as np
from subprocess import call
import os
import math
import sys
sys.path.append('../')
import deepnuc.dubiotools as dbt
#import pygtk
#pygtk.require('2.0')
#import gtk
#import gtk.gdk as gdk
class LogoSheet:
'''
A sheet object for drawing multiple SeqLogos.
Draws a set of Seqlogos onto a single sheet.
Base class
'''
def __init__(self,logo_mats,input_type ='pfm',label_list=None):
"""
Args:
logo_mats: list of matrices holding logo data
nuc_onehots: list of one-hot matrices with nucleotide sequence (4xn onehot format)
input_type:
'ppm' for a 4xn position probability matrix
'pfm' for a 4xn position frequency matrix
'heights' for 4xn specified heights
"""
self.base_init(logo_mats,input_type,label_list)
#Pixel dimensions of sheet
self.width = self.logo_list[0].width + 10
self.height = self.logo_list[0].height*self.num_logos
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,self.width,self.height)
self.context = cairo.Context(self.surface)
#Draw
if self.input_type == 'heights':
self.draw_heights()
elif self.input_type == 'pfm' or self.input_type == 'ppm':
self.draw_pwms()
def base_init(self,logo_mats,input_type,label_list):
'''
I split this method off from __init__ to share
portions of initialization with subclasses - Larry
'''
self.logo_mats = logo_mats
#Labels: Place a user specified string label attached to each sequence
self.label_list = label_list
self.input_type = input_type
if self.label_list != None:
self.do_draw_label = True #Draws a numbered label #MODIFY FROM HERE
else:
self.do_draw_label = False
self.num_logos = len(self.logo_mats)
#Init logos depending on mode
if self.input_type == 'pfm':
self.logo_list = [SeqLogo(pfm) for pfm in self.logo_mats]
elif self.input_type == 'ppm':
#Assuming sample size of 10,000
self.logo_list = [SeqLogo(pfm*10000) for pfm in self.logo_mats]
elif self.input_type == 'heights':
self.logo_list = [HeightLogo(height_mat) for height_mat in self.logo_mats]
#elif self.input_type == 'nucs':
# #Note: this will only draw a single logo
# nuc_pfm = PwmTools.pfm_from_nucs(self.logo_data)
# self.draw_pwm(nuc_pfm)
else:
print "Error, inappropriate option passed."
print "Type either \'pfm\', \'ppm\', or \'heights\'."
#TODO: some logos/pfms might have different widths
#make the program check for largest width
#Initialize dynamic variables.
def draw_pwms(self):
for i,logo in enumerate(self.logo_list):
# print "Drawing index ", i, " at ", logo_list[i].height*i
#self.draw_index_label(self.width-50,logo_list[i].height*i+20,i)
logo.draw_pwm()
#set x,y position of logo
self.context.set_source_surface(logo.surface,0,logo.height*i)
if self.do_draw_label and self.label_list != None:
logo.draw_label(self.label_list[i]+' '+str(i))
self.context.paint()
def draw_heights(self):
for i,logo in enumerate(self.logo_list):
logo.draw()
self.context.set_source_surface(logo.surface,0,logo.height*i)
if self.do_draw_label and self.label_list!=None:
logo.draw_label(self.label_list[i]+' '+str(i))
self.context.paint()
def write_to_png(self,fname):
self.surface.write_to_png(fname)
def write_to_svg(self,fname):
#svg_surf = cairo.SVGSurface(fname,self.width,self.height)
svg_surf = cairo.SVGSurface(fname,self.width,self.height)
svg_context = cairo.Context(svg_surf)
svg_context.set_source_surface(self.surface,0,0)
svg_context.paint()
#svg_surf.finish()
#svg_context.show_page()
def write_to_pdf(self,fname):
pdf_surf = cairo.PDFSurface(fname,self.width,self.height)
pdf_context = cairo.Context(pdf_surf)
pdf_context.set_source_surface(self.surface,0,0)
pdf_context.paint()
#pdf_surf.finish()
#def show(self,fname):
# pass
class SimpleHeightLogoSheet:
def __init__(self,heights,nuc_seqs):
if len(logo_mats) != len(nuc_seqs):
print "Error! Number of logo_mats must match number of nuc_seq!"
return None
self.heights = heights
self.nuc_seqs = nuc_seqs
##Specific to SimpleHeightLogoSheet
self.nuc_height = 20
#Number of pixels between logo and nuc sequence
self.nuc_spacer = 10
#Number of pixels between drawn logos
self.bottom_spacer = 30
#Pixel dimensions of sheet
self.width = self.logo_list[0].width + 10
self.height = ((self.logo_list[0].height+self.bottom_spacer)*self.num_logos)
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,self.width,self.height)
self.context = cairo.Context(self.surface)
self.simple_heights_list = [SimpleHeightLogo(height,seq) for \
height,seq in zip(self.heights,self.nuc_seqs) ]
self.draw_simple_heights()
def draw_simple_heights(self):
#SimpleHeightLogoSheet
for i,simp in enumerate(self.simple_heights_list):
#Draw item
simp.draw()
#set x,y position of logo (y increases from top to down
total_height = (simp.height+
self.nuc_spacer+
self.nuc_height+
self.bottom_spacer)
self.context.set_source_surface(simp.surface,0,total_height*i)
#if self.do_draw_label:
# logo.draw_label(''+' '+str(i))
self.context.paint()
class LogoNucSheet(LogoSheet):
"""
Displays a LogoSheet with a user specified nucleotide sequence below
each logo. This class is useful for displaying relevance map
below the sequence being evaluated
Args:
logo_mats: list of matrices holding logo data
nuc_onehots: list of one-hot matrices with nucleotide sequence (4xn onehot format)
input_type:
'ppm' for a 4xn position probability matrix
'pfm' for a 4xn position frequency matrix
'heights' for 4xn specified heights
"""
def __init__(self,logo_mats,nuc_onehots,input_type='pfm',label_list = None):
self.base_init(logo_mats,input_type,label_list)
self.nuc_onehots= nuc_onehots
##Specific to LogoNucSheet
self.nuc_height = 20
#Number of pixels between logo and nuc sequence
self.nuc_spacer = 10
#Number of pixels between drawn logos
self.bottom_spacer = 30
#Pixel dimensions of sheet
self.width = self.logo_list[0].width + 10
self.height = ((self.logo_list[0].height+
self.nuc_spacer+
self.nuc_height+
self.bottom_spacer)*self.num_logos)
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,self.width,self.height)
self.context = cairo.Context(self.surface)
if self.input_type == 'heights':
self.draw_heights()
elif self.input_type == 'pfm' or self.input_type == 'ppm':
self.draw_pwms()
def draw_pwms(self):
#LogoNucsheet
for i,logo in enumerate(self.logo_list):
#Draw logo
logo.draw_pwm()
#set x,y position of logo (y increases from top to down
total_height = (logo.height+
self.nuc_spacer+
self.nuc_height+
self.bottom_spacer)
self.context.set_source_surface(logo.surface,0,total_height*i)
if self.do_draw_label:
logo.draw_label(''+' '+str(i))
self.context.paint()
seq = self.nuc_onehots[i]
#Draw nucleotide sequence
seq_img = SeqImg(seq)
seq_img.draw()
seq_ypos = total_height*i +self.nuc_spacer
#Set x,y pos of nucleotide sequence
self.context.set_source_surface(seq_img.surface,0,seq_ypos)
self.context.paint()
def draw_heights(self):
#LogoNucsheet
for i,logo in enumerate(self.logo_list):
#Draw logo
logo.draw()
#set x,y position of logo (y increases from top to down
total_height = (logo.height+
self.nuc_spacer+
self.nuc_height+
self.bottom_spacer)
self.context.set_source_surface(logo.surface,0,total_height*i)
if self.do_draw_label:
logo.draw_label(''+' '+str(i))
self.context.paint()
seq = self.nuc_onehots[i]
#Draw nucleotide sequence
seq_img = SeqImg(seq)
seq_img.draw()
seq_ypos = total_height*i + logo.height+ self.nuc_spacer
#Set x,y pos of nucleotide sequence
self.context.set_source_surface(seq_img.surface,0,seq_ypos)
self.context.paint()
class BaseLogo(object):
#These are cairo glyph indices for specific letters
BIT_SCALE = 50 #Where to draw the x-axis tick
A_IND = 36
T_IND = 55
G_IND = 42
C_IND = 38
glyph_dict = { 'A':A_IND,'T':T_IND,'G':G_IND,'C':C_IND}
NUC_FREQ = 0.25
def draw_label(self,label):
label_x=self.x+self.width-80
label_y=self.y+20
self.context.save()
self.context.set_source_rgb(0,0,0)
self.context.select_font_face("Arial",cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
self.context.move_to(label_x,label_y)
#self.context.rectangle(self.x,self.y,30,self.height)
#self.context.fill()
#self.context.rectangle(self.x,self.y,5,5)
#self.context.fill()
self.context.set_font_size(18)
self.context.show_text(label)
self.context.restore()
#save scale show_glyphs restore
def draw_axes(self):
self.context.save()
#Draw x-axis line
self.context.set_line_width(0.5)
self.context.move_to(self.x_offset,self.x_axis_line)
self.context.line_to(self.width,self.x_axis_line)
self.context.stroke()
#Draw y axis line
self.context.move_to(self.x_offset,self.x_axis_line)
self.context.line_to(self.x_offset,0)
self.context.stroke()
self.context.restore()
num_yticks = int(np.floor(self.height/self.ytick))
#Draw positive ticks
for i in range(num_yticks+1):
self.context.set_line_width(0.5)
self.context.move_to(self.x_offset,self.x_axis_line-i*self.ytick)
self.context.line_to(self.x_offset+2,self.x_axis_line-i*self.ytick)
self.context.stroke()
def write_to_png(self,fname):
self.surface.write_to_png(fname)
def write_to_svg(self,fname):
#svg_surf = cairo.SVGSurface(fname,self.width,self.height)
svg_surf = cairo.SVGSurface(fname,self.width,self.height)
svg_context = cairo.Context(svg_surf)
svg_context.set_source_surface(self.surface,0,0)
svg_context.paint()
#svg_context.show_page()
def write_to_pdf(self,fname):
#ref:http://zetcode.com/gfx/pycairo/backends/
pdf_surf = cairo.PDFSurface(fname,self.width,self.height)
pdf_context = cairo.Context(pdf_surf)
pdf_context.set_source_surface(self.surface,0,0)
pdf_context.paint()
def show_imagej_unix(self,fname):
#Shows the current image by making a system call to imagej
#This only works on unix systems with imagej installed
#I wrote this for debugging.
os.system('imagej'+' '+fname+'&')
#def show_gtk(self):
# #Note: pass self.draw_pwm not self.draw_pwm()
# #print self.draw_pwm
# display_gtk = DisplayPwmGtk(self.draw_pwm,self.width,self.height)
class HeightLogo(BaseLogo):
'''
This is similar to SeqLogo only the values within the input logo matrix are treated as
raw letter height values.
This class treates a 4xn numpy matrix as a map of letter heights with the rows
corresponding to TCAG.
Letters are sorted by rank, with top ranked letter placed on top
All four letters are drawn
'''
def __init__(self,logo_matrix,ytick=10):
#BaseLogo.__init__(self)
self.logo_matrix = logo_matrix
self.ytick = ytick #Set y-ticks
self.min_value = np.min(self.logo_matrix)
self.max_value = np.max(self.logo_matrix)
if self.min_value < 0:
self.has_neg_values = True
else:
self.has_neg_values = False
self.x =0
self.y =0
self.seq_len = self.logo_matrix.shape[1]
self.font_size =30
#Width of each nucleotide in the figure
self.bp_width = self.font_size +1
if self.has_neg_values:
self.height=256 #This is distinct from pwm code
else:
self.height=128
#self.height = int(self.max_value)
#print "Height of figure", self.height
#if self.height > 300:
# print "Height of HeightLogo",self.height,"is too high"
# print "Setting to 300 pixels"
# self.height=300
self.x_axis_line = self.height-5
self.x_offset = 15
self.x_axis_pad = 2
self.width = self.bp_width*self.seq_len+self.x_offset+self.x_axis_pad
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.width,self.height)
self.context = cairo.Context(self.surface)
self.context.select_font_face("Monospace",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
self.context.set_font_size(self.font_size)
self.context.set_source_rgb(0.1, 0.1, 0.1)
self.context.scale(1,1)
def draw_neg_axes(self):
self.context.save()
#Draw x-axis line
self.context.set_line_width(0.5)
self.context.move_to(self.x_offset,self.x_axis_line)
self.context.line_to(self.width,self.x_axis_line)
self.context.stroke()
#Draw y axis line
self.context.move_to(self.x_offset,0)
self.context.line_to(self.x_offset,self.height)
self.context.stroke()
self.context.restore()
num_yticks = int(np.floor(self.height/self.ytick))
#Draw positive ticks
for i in range(num_yticks+1):
self.context.set_line_width(0.5)
self.context.move_to(self.x_offset,self.x_axis_line-i*self.ytick)
self.context.line_to(self.x_offset+2,self.x_axis_line-i*self.ytick)
self.context.stroke()
#Draw negative ticks
for i in range(num_yticks+1):
self.context.set_line_width(0.5)
self.context.move_to(self.x_offset,self.x_axis_line+i*self.ytick)
self.context.line_to(self.x_offset+2,self.x_axis_line+i*self.tick)
self.context.stroke()
def draw(self):
#HeightLogo
if self.has_neg_values:
self.draw_neg_axes()
else:
self.draw_axes()
self.context.save()
#Get ranks of each column.
#Biggest value gets highest rank number and get drawn first
filter_ranks = self.logo_matrix.argsort(axis=0)
nucs = self.logo_matrix.shape[1]
letters = self.logo_matrix.shape[0] #T,C,A,G
row_dict = {0:'T',1:'C',2:'A',3:'G'}
#i is nucleotide index, j is letter index
for i in range(nucs):
x_start_spacer = 4
#Note:x_axis pad just adds some space between x=0 and the first
xpos = self.bp_width*i+self.x_offset+self.x_axis_pad
ranks = np.ndarray.tolist(filter_ranks[:,i])
dy_pos = self.x_axis_line
dy_neg = self.x_axis_line
for rank_ind in ranks:
cur_let_str = row_dict[rank_ind]
cur_height = self.logo_matrix[rank_ind,i]
#ucLetter.BIT_SCALE = 100
cur_let = NucLetter(self.context,
cur_let_str,
xpos,
0,
cur_height,
rank_ind)
if cur_let.signed_height>0: #If positive weight
cur_let.move(xpos,dy_pos)
dy_pos = dy_pos-cur_let.signed_height
self.context.scale(1,1)
cur_let.draw()
elif cur_let.signed_height<0: #If negative weight
dy_neg = dy_neg-cur_let.signed_height
cur_let.move(xpos,dy_neg)
self.context.scale(1,1)
cur_let.draw()
self.context.restore()
class SimpleHeightLogo(HeightLogo):
'''
Similar to height logo, except this class
takes a nucleotide string, and a 1D array of heights
and scales each letter to its corresponding height.
Only one letter is drawn per position
'''
def __init__(self,heights,nuc_seq,ytick=10):
self.nuc_seq = list(nuc_seq)
self.col_heights = np.sum(heights,axis=0)
super(SimpleHeightLogo, self).__init__(heights,ytick)
if self.col_heights.shape[0] != len(self.nuc_seq):
print "SimpleHeightLogo init error dims do not match"
print "heights.shape[1]:{}\tlength of nuc_seq:{}".\
format(heights.shape[1],len(self.nuc_seq))
def draw(self):
#SimpleHeightLogo
if self.has_neg_values:
self.draw_neg_axes()
else:
self.draw_axes()
self.context.save()
row_dict = {0:'T',1:'C',2:'A',3:'G'}
#i is nucleotide index, j is letter index
for i,nuc in enumerate(self.nuc_seq):
x_start_spacer = 4
#Note:x_axis pad just adds some space between x=0 and the first
xpos = self.bp_width*i+self.x_offset+self.x_axis_pad
#ranks = np.ndarray.tolist(filter_ranks[:,i])
dy_pos = self.x_axis_line
dy_neg = self.x_axis_line
cur_let = NucLetter(self.context,
nuc,
xpos,
0,
self.col_heights[i],
0)
if cur_let.signed_height>0: #If positive weight
cur_let.move(xpos,dy_pos)
dy_pos = dy_pos-cur_let.signed_height
self.context.scale(1,1)
cur_let.draw()
elif cur_let.signed_height<0: #If negative weight
dy_neg = dy_neg-cur_let.signed_height
cur_let.move(xpos,dy_neg)
self.context.scale(1,1)
cur_let.draw()
self.context.restore()
class SeqImg(BaseLogo):
'''
A class for drawing an image of a nucleotide sequence
No height transformations. Just a simple sequence
'''
def __init__(self,nuc_onehot_matrix,font_size=10):
self.nuc_onehot = nuc_onehot_matrix
self.nuc_string = dbt.onehot_to_nuc(self.nuc_onehot)
self.seq_len = int(self.nuc_onehot.shape[1])
if self.seq_len != int(np.sum(self.nuc_onehot)):
print "Error! nuc matrix is not one-hot for SeqImg"
self.x = 0
self.y = 0
self.font_size =30
#Width of each nucleotide in the figure
self.bp_width = self.font_size +1
#the following dimension params are used to keep this sequence aligned
#with those produced by SeqLogo
self.x_offset = 15
self.x_axis_pad = 2
self.width = self.bp_width*self.seq_len+self.x_offset+self.x_axis_pad
self.height = 32
self.bp_height=.25
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.width,self.height)
self.context = cairo.Context(self.surface)
self.context.select_font_face("Monospace",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
self.context.set_font_size(self.font_size)
self.context.set_source_rgb(0.1, 0.1, 0.1)
self.context.scale(1,1)
#self.draw()
def draw(self):
#SeqImg
self.context.save()
for i,letter in enumerate(list(self.nuc_string)):
xpos = self.bp_width*i+self.x_offset+self.x_axis_pad
nuclet = NucLetter(self.context,letter,xpos,20,self.bp_height)
self.context.scale(1,1)
nuclet.draw()
self.context.restore()
class SeqLogo(BaseLogo):
def __init__(self,np_pfm):
self.init_from_pfm(np_pfm)
self.ytick = BaseLogo.BIT_SCALE
@classmethod
def init_from_nuc_list(cls,nuc_str_list):
'''
Initialize from a list of nucleotides (strings, not Biopython objects)
Call SeqLogo.init_from_nuc_list(my_nucs)
to initialize in this manner
'''
pfm = PwmTools.pfm_from_nucs(nuc_str_list)
cls.init_from_pfm(pfm)
def init_from_pfm(self,np_pfm):
'''Initialize from a position frequecy matrix'''
'''Tip: If passing a convolution filter as a pfm,
multiply the convolution matrix by the number of samples
that was used to determine the makeup of the convolution filter '''
#BaseLogo.__init__(self)
self.x =0
self.y =0
self.pfm = np_pfm
self.ppm = PwmTools.pfm_to_ppm(self.pfm)
self.pwm = PwmTools.pfm_to_pwm(self.pfm)
self.ic = PwmTools.pfm_to_ic(self.pfm)
self.seq_len = np_pfm.shape[1]
self.font_size =30
#Width of each nucleotide in the figure
self.bp_width = self.font_size +1
self.height=128
self.x_offset = 15
self.x_axis_pad = 2
self.width = self.bp_width*self.seq_len+self.x_offset+self.x_axis_pad
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.width,self.height)
self.context = cairo.Context(self.surface)
self.context.select_font_face("Monospace",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
self.context.set_font_size(self.font_size)
self.context.set_source_rgb(0.1, 0.1, 0.1)
self.context.scale(1,1)
self.x_axis_line = self.height-5
def set_pfm(self,new_pfm):
self.__init__(new_pfm)
#self.pfm = new_pfm
#self.ppm = PwmTools.pfm_to_ppm(new_pfm,True)
#self.pwm = PwmTools.pfm_to_pwm(new_pfm,True)
#self.ic = PwmTools.pfm_to_ic(new_pfm,True)
def set_surface_dims(self,width,height):
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width,height)
self.context = cairo.Context(self.surface)
def print_ic(self):
print self.ic
return self.ic
def print_pfm(self):
print self.pfm
return self.pfm
def print_ppm(self):
print self.ppm
return self.ppm
def print_logo_ic_heights(self):
logo_heights = self.get_logo_ic_heights()
return logo_heights
def get_logo_ic_heights(self):
np.set_printoptions(threshold=np.nan)
logo_heights = self.ppm*self.ic
return logo_heights
def draw_pwm(self):
self.draw_axes()
self.context.save()
logo_heights = self.get_logo_ic_heights()
#Get ranks of each column.
#Biggest value gets highest rank number and get drawn first
logo_ranks = logo_heights.argsort(axis=0)
seq_len= logo_heights.shape[1]
#num_letters = logo_heights.shape[0]
row_dict = {0:'T',1:'C',2:'A',3:'G'}
#i is nucleotide index, j is letter index
for i in range(seq_len):
x_start_spacer = 4
#Note:x_axis pad just adds some space between x=0 and the first
#nuc
xpos = self.bp_width*i+self.x_offset+self.x_axis_pad
#First number indicates index of lowest height number
#Lowest number should get drawn first
ranks = np.ndarray.tolist(logo_ranks[:,i])
dy = self.x_axis_line
for rank_ind in ranks:
#use 4-rank_ind b/c of rank reversal
cur_let_str = row_dict[rank_ind]
cur_height = logo_heights[rank_ind,i]
#self.height here is the height of the seq logo figure.
#Populate dictionary to store Each letter needs to be initialized to calculate height
cur_let = NucLetter(self.context,cur_let_str,xpos,0,cur_height,rank_ind)
cur_let.move(xpos,dy)
dy = dy-cur_let.height
self.context.scale(1,1)
cur_let.draw()
self.context.restore()
class NucLetter:
#Cario glyph indices for the font monospace
A_IND = 36
T_IND = 55
G_IND = 42
C_IND = 38
glyph_dict = { 'A':A_IND,'T':T_IND,'G':G_IND,'C':C_IND}
BIT_SCALE= BaseLogo.BIT_SCALE #the height in pixels equivalent to 1 bit on our final scale
def __init__(self,cairo_context,nuc_letter,xpos=0,ypos=0, input_height = 1.0,rank=0):
self.x=xpos
self.y=ypos
self.context = cairo_context
self.nuc_letter = nuc_letter
self.letter_ind = NucLetter.glyph_dict[self.nuc_letter]
#print self.context.glyph_extents([(self.letter_ind,0,0)])
self.base_width = self.context.glyph_extents([(self.letter_ind,0,0)])[2]
self.base_height = self.context.glyph_extents([(self.letter_ind,0,0)])[3]
#print "Base_height",self.base_height
self.width = self.base_width
#Rescale letter to specified height
self.signed_height= math.floor(input_height*NucLetter.BIT_SCALE)
self.y_scale = abs(input_height*NucLetter.BIT_SCALE/self.base_height)
self.height = math.floor(self.base_height * self.y_scale)
#print "Height",self.height
#self.height=self.base_height
self.bottom = self.y+self.height
self.right = self.x+self.width
self.color = self.color_by_letter()
self.rank=rank
def move(self,xpos,ypos):
self.x = xpos
self.y = ypos
def color_by_letter(self):
if self.nuc_letter =='A':
return (1,1.0,0,0) #red
elif self.nuc_letter =='T':
return (1,0,1.0,0)#green
elif self.nuc_letter =='G':
return (1,0,0,1.0,1) #blue
elif self.nuc_letter=='C':
return (1,.8,.8,0) #yellow
def draw(self):
self.context.save()
self.context.set_source_rgb(self.color[1],self.color[2],self.color[3])
if (self.y_scale>0):
self.context.scale(1.0,self.y_scale)
self.context.translate(0,-self.y*(self.y_scale-1)/self.y_scale)
#self.context.rectangle(self.x,self.y,30,self.height)
#self.context.fill()
#self.context.rectangle(self.x,self.y,5,5)
#self.context.fill()
self.context.show_glyphs([(self.letter_ind,self.x,self.y)])
self.context.restore()
#save scale show_glyphs restore
class PwmTools:
#Deciding on the pseudocount value has been done in a paper from 2009
#It is recommended to add 0.8/4 to each entry of the pfm
#http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2647310/
PSEUDOCOUNT = 0.8
#PSEUDOCOUNT = 0.00000001
#This pseudocount can have a large
#impact if the pfm was derived from few sequences
#Decent refreshers on how to build these:
#http://biologie.univ-mrs.fr/upload/p202/01.4.PSSM_theory.pdf
#https://en.wikipedia.org/wiki/Sequence_logo
@staticmethod
def pfm_from_nucs(nuc_str_list):
num_nucs = len(nuc_str_list)
nuc_len = len(nuc_str_list[0])
pfm = np.zeros((4,nuc_len))
for nuc_str in nuc_str_list:
pfm += dbt.seq_to_onehot(nuc_str)
return pfm
@staticmethod
def pfm_to_ppm(pfm_arr,use_pseudocounts=False):
'''Convert a numpy position frequency matrix (PFM) to a
position probability matrix (PPM).
Rows are in order 'TCAG'
'''
pfm_arr = np.asarray(pfm_arr, dtype='float32')
if use_pseudocounts:
#Add pseudocounts to the ppm to avoid infinities
pseudo_sums = np.sum(pfm_arr,axis=0)+PwmTools.PSEUDOCOUNT
return np.true_divide((pfm_arr+(PwmTools.PSEUDOCOUNT/4.)),pseudo_sums)
else:
sums = np.sum(pfm_arr,axis=0)
return np.true_divide(pfm_arr,sums)
@staticmethod
def ppm_to_pwm(ppm_arr):
'''Convert a numpy position probability matrix (PPM) to a
position weight matrix (PWM).
Rows are in order 'TCAG'
'''
NUC_FREQ = 0.25
#Clipping is to avoid getting infinite or NaN values
return np.log(np.clip( np.true_divide((ppm_arr),NUC_FREQ) ,1e-10,1.0))
@staticmethod
def pfm_to_pwm(pfm_arr,use_pseudocounts = True):
'''Convert a numpy position frequency matrix (PFM) to a
position weight matrix (PWM).
Rows are in order 'ATGC'
'''
if (use_pseudocounts):
return PwmTools.ppm_to_pwm(
PwmTools.pfm_to_ppm(pfm_arr,True))
return PwmTools.ppm_to_pwm(PwmTools.pfm_to_ppm(pfm_arr))
@staticmethod
def pfm_to_ic(pfm_arr,use_pseudocounts=True):
#ic stands for information content
#Add pseudocounts to avoid inifinites
if (use_pseudocounts):
ppm = PwmTools.pfm_to_ppm(pfm_arr,True)
else:
ppm = PwmTools.pfm_to_ppm(pfm_arr)
#en is the small-sample correction. This value is 0 for num_seqs > 3
#This method of calculating num_seqs might throw an error in certain cases
num_seqs = np.sum(pfm_arr[:,0],axis=0)
en = (1/np.log(2.))*((4.-1)/(2.*num_seqs))
return np.log2(ppm.shape[0]) -( -np.sum(ppm*np.log2(np.clip(ppm,1e-10,1.0)),axis=0) + en)
@staticmethod
def ppm_to_ic(ppm,use_pseudocounts = True):
#Note: for this function, the small-sample correction is not applied
# since the number of sequences in unknown.
#This should be valid if num_seqs > 3
return np.log2(ppm.shape[0]) -( -np.sum(ppm*np.log2(np.clip(ppm,1e-10,1.0)),axis=0))
@staticmethod
def ppm_to_logo_heights(ppm):
ic = PwmTools.ppm_to_ic(ppm)
return ppm*ic
@staticmethod
def pfm_to_logo_heights(pfm_arr):
ppm = PwmTools.pfm_to_ppm(pfm_arr)
ic = PwmTools.pfm_to_ic(pfm_arr)
return ppm*ic
'''
class DisplayPwmGtk():
#ref: http://zetcode.com/gfx/pycairo/images/
def __init__(self,draw_func,width,height):
self.draw_func =draw_func
self.width = width
self.height =height
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.resize(width,height)
self.window.set_title("PWM")
self.window.set_position(gtk.WIN_POS_CENTER)
#self.window.set_position(gtk.WindowPosition.CENTER)
gtk.set_from_pixmap
#self.draw_area = gtk.DrawingArea()
#self.draw_area.connect("draw",self.on_draw)
self.window.add(draw_area)
#self.button = gtk.Button("Close")
#self.button.connect("clicked",self.destroy)
#self.window.add(self.button)
self.window.connect("destroy",gtk.main_quit)
#self.button.show()
self.window.show()
def on_draw(self,wid,cr):
self.draw_func()
def main(self):
gtk.main()
def destroy(self,widget,data=None):
gtk.main_quit()
'''
|
LarsDu/DeepNucDecomp
|
duseqlogo/LogoTools.py
|
Python
|
gpl-3.0
| 33,504
|
[
"Biopython"
] |
bde2afbdc0797221fbf9b6720f1e73d177aa6ade7e1c4e2d3e593440dac0d004
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"ndtr",
"log_ndtr",
"log_cdf_laplace",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = -20
LOGNDTR_FLOAT32_LOWER = -10
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = 8
LOGNDTR_FLOAT32_UPPER = 5
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t^2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = array_ops.where(math_ops.less(z, half_sqrt_2),
1. + math_ops.erf(w),
array_ops.where(math_ops.greater(w, 0.),
2. - math_ops.erfc(z),
math_ops.erfc(z)))
return 0.5 * y
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x^2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x^2)^n, n=1:N}
R_N = O(exp(-0.5 x^2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from py/scipy/special/cephes/ndtr.c.
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Tayor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return array_ops.where(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
array_ops.where(math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment),
series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, apppropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * math.log(2. * math.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
if series_order <= 0:
return 1.
x_2 = math_ops.square(x)
even_sum = 0.
odd_sum = 0.
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
if n % 2:
odd_sum += _double_factorial(2 * n - 1) / x_2n
else:
even_sum += _double_factorial(2 * n - 1) / x_2n
x_2n *= x_2
return 1. + even_sum - odd_sum
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)
return array_ops.where(x < 0., lower_solution, upper_solution)
|
odejesush/tensorflow
|
tensorflow/contrib/distributions/python/ops/special_math.py
|
Python
|
apache-2.0
| 9,369
|
[
"Gaussian"
] |
2f92deb8b0bfe072e6bfdd655b70fad4d570b7356f1a4beb1cc214c8e04e3d63
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from decimal import Decimal
from dateutil.relativedelta import relativedelta
from stoqlib.domain.taxes import (ProductIcmsTemplate,
ProductIpiTemplate,
ProductTaxTemplate,
InvoiceItemIpi)
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.lib.dateutils import localnow
__tests__ = 'stoqlib/domain/taxes.py'
class TestBaseTax(DomainTest):
def test_set_item_tax(self):
tax_template = ProductTaxTemplate(
store=self.store,
tax_type=ProductTaxTemplate.TYPE_ICMS)
icms_template = ProductIcmsTemplate(
store=self.store,
product_tax_template=tax_template)
tax_template = ProductTaxTemplate(
store=self.store,
tax_type=ProductTaxTemplate.TYPE_IPI)
ipi_template = ProductIpiTemplate(
store=self.store,
product_tax_template=tax_template)
product = self.create_product()
product.icms_template = icms_template
product.ipi_template = ipi_template
sale_item = self.create_sale_item()
sale_item.sellable.product = product
sale_item.icms_info.set_item_tax(sale_item)
sale_item.ipi_info.set_item_tax(sale_item)
class TestProductTaxTemplate(DomainTest):
def test_get_tax_model(self):
tax_template = ProductTaxTemplate(
store=self.store,
tax_type=ProductTaxTemplate.TYPE_ICMS)
self.failIf(tax_template.get_tax_model())
ProductIcmsTemplate(
store=self.store,
product_tax_template=tax_template)
self.failUnless(tax_template.get_tax_model())
def test_get_tax_type_str(self):
tax_template = ProductTaxTemplate(
store=self.store,
tax_type=ProductTaxTemplate.TYPE_ICMS)
self.assertEqual(tax_template.get_tax_type_str(), u'ICMS')
class TestProductIcmsTemplate(DomainTest):
"""Tests for ProductIcmsTemplate class"""
def test_is_p_cred_sn_valid(self):
tax_template = ProductTaxTemplate(
store=self.store,
tax_type=ProductTaxTemplate.TYPE_ICMS)
icms_template = ProductIcmsTemplate(
store=self.store,
product_tax_template=tax_template)
self.assertTrue(icms_template.is_p_cred_sn_valid())
expire_date = localnow()
icms_template.p_cred_sn_valid_until = expire_date
self.assertTrue(icms_template.is_p_cred_sn_valid())
expire_date = localnow() + relativedelta(days=+1)
icms_template.p_cred_sn_valid_until = expire_date
self.assertTrue(icms_template.is_p_cred_sn_valid())
expire_date = localnow() + relativedelta(days=-1)
icms_template.p_cred_sn_valid_until = expire_date
self.assertFalse(icms_template.is_p_cred_sn_valid())
class TestInvoiceItemIcms(DomainTest):
"""Tests for InvoiceItemIcms class"""
def _get_sale_item(self, sale_item_icms=None, quantity=1, price=10):
sale = self.create_sale()
product = self.create_product(price=price)
sale_item = sale.add_sellable(product.sellable, quantity=quantity)
if sale_item_icms:
sale_item.icms_info = sale_item_icms
return sale_item
def testVCredIcmsSnCalc(self):
"""Test for v_cred_icms_sn calculation.
This test should fail if v_cred_icms_sn get calculated wrong or gets
calculated for wrong values of csosn
"""
# Test for CSOSN 101. This should get v_cred_icms_sn calculated.
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item_icms.csosn = 101
sale_item_icms.update_values(sale_item)
sale_item_icms.p_cred_sn = Decimal("3.10")
expected_v_cred_icms_sn = (sale_item.get_total() *
sale_item_icms.p_cred_sn / 100)
sale_item_icms.update_values(sale_item)
self.assertEqual(sale_item_icms.v_cred_icms_sn,
expected_v_cred_icms_sn)
# Test for CSOSN 201. This should get v_cred_icms_sn calculated.
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 2, 30)
sale_item_icms.csosn = 201
sale_item_icms.p_cred_sn = Decimal("2.90")
expected_v_cred_icms_sn = (sale_item.get_total() *
sale_item_icms.p_cred_sn / 100)
sale_item_icms.update_values(sale_item)
self.assertEqual(sale_item_icms.v_cred_icms_sn,
expected_v_cred_icms_sn)
# Test for CSOSN 500. This should not get v_cred_icms_sn calculated.
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item_icms.csosn = 500
sale_item_icms.p_cred_sn = Decimal("3.10")
sale_item_icms.update_values(sale_item)
self.failIf(sale_item_icms.v_cred_icms_sn)
def test_update_values_simples(self):
# Test for CSOSN 900. This should get v_icms and v_icms_st calculated
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 200)
sale_item_icms.csosn = 900
sale_item_icms.p_icms = 1
sale_item_icms.p_icms_st = 2
sale_item_icms.update_values(sale_item)
self.assertEquals(sale_item_icms.v_icms, Decimal("2"))
self.assertEquals(sale_item_icms.v_icms_st, Decimal("2"))
def test_update_values_normal(self):
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 0
sale_item_icms.update_values(sale_item)
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 10
sale_item_icms.p_red_bc_st = 10
sale_item_icms.p_mva_st = 10
sale_item_icms.v_bc_st = 10
sale_item_icms.p_icms_st = 10
sale_item_icms.v_icms = 10
sale_item_icms.v_icms_st = 10
sale_item_icms.p_red_bc = 10
sale_item_icms.p_icms = 10
sale_item_icms.p_v_bc = 10
sale_item_icms.p_red_bc = 10
sale_item_icms.update_values(sale_item)
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 20
sale_item_icms.update_values(sale_item)
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 30
sale_item_icms.update_values(sale_item)
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 40
sale_item_icms.update_values(sale_item)
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 51
sale_item_icms.update_values(sale_item)
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 60
sale_item_icms.update_values(sale_item)
sale_item_icms = self.create_invoice_item_icms()
sale_item = self._get_sale_item(sale_item_icms, 1, 10)
sale_item.sale.branch.crt = 0
sale_item_icms.cst = 70
sale_item_icms.update_values(sale_item)
class TestInvoiceItemIpi(DomainTest):
def _get_sale_item(self, sale_item_ipi=None, quantity=1, price=10):
sale = self.create_sale()
product = self.create_product(price=price)
sale_item = sale.add_sellable(product.sellable,
quantity=quantity)
if sale_item_ipi:
sale_item.ipi_info = sale_item_ipi
return sale_item
def test_set_initial_values(self):
sale_item_ipi = InvoiceItemIpi(store=self.store)
sale_item = self._get_sale_item(sale_item_ipi, 1, 10)
sale_item_ipi.cst = 0
sale_item_ipi.p_ipi = 0
sale_item_ipi.set_initial_values(sale_item)
sale_item_ipi = InvoiceItemIpi(store=self.store)
sale_item = self._get_sale_item(sale_item_ipi, 1, 10)
sale_item_ipi.cst = 0
sale_item_ipi.calculo = InvoiceItemIpi.CALC_UNIDADE
sale_item_ipi.set_initial_values(sale_item)
sale_item_ipi = InvoiceItemIpi(store=self.store)
sale_item = self._get_sale_item(sale_item_ipi, 1, 10)
sale_item_ipi.cst = 1
sale_item_ipi.calculo = InvoiceItemIpi.CALC_UNIDADE
sale_item_ipi.set_initial_values(sale_item)
|
tiagocardosos/stoq
|
stoqlib/domain/test/test_taxes.py
|
Python
|
gpl-2.0
| 10,023
|
[
"VisIt"
] |
9c0cf2c70c81b9fe292bc8ba8abfc18c0c093b263fe9e03752fea48ce4d1567d
|
# encoding: UTF-8
#
# Copyright 2012-2013 Alejandro Autalán
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://pygubu.web.here
from __future__ import unicode_literals
from collections import OrderedDict
import sys
import xml.etree.ElementTree as ET
import re
try:
import tkinter as tk
import tkinter.ttk as ttk
except:
import Tkinter as tk
import ttk
import pygubu
from pygubu.stockimage import StockImage
import pygubudesigner.widgets.toplevelframe
try:
basestring
except NameError:
basestring = str
RE_FONT = re.compile("(?P<family>\{\w+(\w|\s)*\}|\w+)\s?(?P<size>-?\d+)?\s?(?P<modifiers>\{\w+(\w|\s)*\}|\w+)?")
class BuilderForPreview(pygubu.Builder):
def _pre_process_data(self, data):
super(BuilderForPreview, self)._pre_process_data(data)
cname = data['class']
# Do not resize main window when
# Sizegrip is dragged on preview panel.
if cname == 'ttk.Sizegrip':
data['properties']['class_'] = 'DUMMY_CLASS'
class Preview(object):
def __init__(self, id_, canvas, x=0, y=0, rpaths=None):
self.id = 'preview_{0}'.format(id_)
self.x = x
self.y = y
self.w = 10
self.h = 10
self.min_w = self.w
self.min_h = self.h
self.resizer_h = 10
self.canvas = canvas
self.shapes = {}
self._create_shapes()
# --------
self.builder = None
self.canvas_window = None
self._resource_paths = rpaths if rpaths is not None else []
def width(self):
return self.w
def height(self):
return self.h + self.resizer_h
def _create_builder(self):
b = BuilderForPreview()
for p in self._resource_paths:
b.add_resource_path(p)
return b
def _create_shapes(self):
# Preview box
c = self.canvas
x, y, x2, y2 = (-1001, -1000, -1001, -1000)
s1 = c.create_rectangle(x, y, x2, y2,
width=2, outline='blue', tags=self.id)
s2 = c.create_rectangle(x, y, x2, y2, fill='blue', outline='blue',
tags=(self.id, 'resizer'))
s3 = c.create_text(x, y, text='widget_id', anchor=tk.NW,
fill='white', tags=self.id)
s4 = c.create_window(x, y, anchor=tk.NW, tags=self.id)
self.shapes = {
'outline': s1,
'resizer': s2,
'text': s3,
'window': s4
}
self.draw()
def erase(self):
self.canvas_window.destroy()
for key, sid in self.shapes.items():
self.canvas.delete(sid)
def draw(self):
c = self.canvas
x, y, x2, y2 = (self.x, self.y, self.x + self.w, self.y + self.h)
c.coords(self.shapes['outline'], x, y, x2, y2)
tbbox = c.bbox(self.shapes['text'])
tw, th = tbbox[2] - tbbox[0] + 10, tbbox[3] - tbbox[1] + 6
self.resizer_h = th
rx2 = self.x + self.w
ry2 = self.y + self.h + self.resizer_h
rx = rx2 - tw
ry = self.y + self.h
c.coords(self.shapes['resizer'], rx, ry, rx2, ry2)
tx = rx + 5
ty = ry + 3
c.coords(self.shapes['text'], tx, ty)
c.coords(self.shapes['window'], x, y)
def move_by(self, dx, dy):
self.x += dx
self.y += dy
self.draw()
def resize_to(self, w, h):
self.resize_by(w - self.w, h - self.h)
def resize_by(self, dw, dh):
new_w = self.w + dw
new_h = self.h + dh
changed = False
if new_w >= self.min_w:
self.w = new_w
changed = True
if new_h >= self.min_h:
self.h = new_h
changed = True
if changed:
self.draw()
self._resize_preview_window()
def _resize_preview_window(self):
if self.canvas_window:
self.canvas_window.configure(width=self.w, height=self.h)
def update(self, widget_id, xmlnode):
# delete current preview
# FIXME maybe do something to update preview without re-creating all ?
del self.builder
self.builder = None
self.canvas.itemconfigure(self.shapes['window'], window='')
if self.canvas_window:
self.canvas_window.destroy()
# Create preview
canvas_window = ttk.Frame(self.canvas)
canvas_window.rowconfigure(0, weight=1)
canvas_window.columnconfigure(0, weight=1)
self.canvas.itemconfigure(self.shapes['text'], text=widget_id)
self._preview_widget = \
self.create_preview_widget(canvas_window, widget_id, xmlnode)
self.canvas_window = canvas_window
self.canvas.itemconfigure(self.shapes['window'], window=canvas_window)
canvas_window.update_idletasks()
canvas_window.grid_propagate(0)
self.min_w = self._get_wreqwidth()
self.min_h = self._get_wreqheight()
self.w = self.min_w * 2
self.h = self.min_h * 2
self.resize_to(self.min_w, self.min_h)
def create_preview_widget(self, parent, widget_id, xmlnode):
self.builder = self._create_builder()
self.builder.add_from_xmlnode(xmlnode)
widget = self.builder.get_object(widget_id, parent)
return widget
def get_widget_by_id(self, widget_id):
return self.builder.get_object(widget_id)
def create_toplevel(self, widget_id, xmlnode):
# Create preview
builder = pygubu.Builder()
builder.add_from_xmlnode(xmlnode)
top = tk.Toplevel(self.canvas)
top.columnconfigure(0, weight=1)
top.rowconfigure(0, weight=1)
builder.get_object(widget_id, top)
return top
def _get_wreqwidth(self):
return self._preview_widget.winfo_reqwidth()
def _get_wreqheight(self):
return self._preview_widget.winfo_reqheight()
class DefaultMenuPreview(Preview):
def create_preview_widget(self, parent, widget_id, xmlnode):
self.builder = self._create_builder()
self.builder.add_from_xmlnode(xmlnode)
menubutton = ttk.Menubutton(parent, text='Menu preview')
menubutton.grid()
widget = self.builder.get_object(widget_id, menubutton)
menubutton.configure(menu=widget)
return menubutton
def create_toplevel(self, widget_id, xmlnode):
# Create preview
builder = pygubu.Builder()
builder.add_from_xmlnode(xmlnode)
top = tk.Toplevel(self.canvas)
top.columnconfigure(0, weight=1)
top.rowconfigure(0, weight=1)
menu = builder.get_object(widget_id, top)
top['menu'] = menu
return top
def resize_by(self, dw, hw):
return
class OnCanvasMenuPreview(Preview):
fonts = {}
def __init__(self, id_, canvas, x=0, y=0, rpaths=None):
super(OnCanvasMenuPreview, self).__init__(id_, canvas, x, y, rpaths)
self._menu = None
self._cwidth = 0
self._cheight = 0
def _get_wreqwidth(self):
return self._cwidth
def _get_wreqheight(self):
return self._cheight
def _get_font(self, font):
fontname = family = 'TkMenuFont'
size = 12
modifiers = ''
tclobject = False
if font and isinstance(font, basestring):
fontname = family = font
elif isinstance(font, tk._tkinter.Tcl_Obj):
fontname = family = str(font)
tclobject = True
elif isinstance(font, tuple):
fontname = str(font[4])
tclobject = True
if tclobject:
s = RE_FONT.search(fontname)
if s:
g = s.groupdict()
family = g['family'].replace('{', '').replace('}', '')
size = g['size']
modifiers = g['modifiers'] if g['modifiers'] else ''
if fontname not in OnCanvasMenuPreview.fonts:
weight = 'bold' if 'bold' in modifiers else 'normal'
slant = 'italic' if 'italic' in modifiers else 'roman'
underline = '1' if 'underline' in modifiers else '0'
overstrike = '1' if 'overstrike' in modifiers else '0'
kw = {'family': family, 'weight': weight, 'slant': slant,
'underline': underline, 'overstrike': overstrike}
if size:
kw['size'] = size
OnCanvasMenuPreview.fonts[fontname] = tk.font.Font(**kw)
return OnCanvasMenuPreview.fonts[fontname]
def _calculate_menu_wh(self):
""" Calculate menu widht and height."""
w = iw = 50
h = ih = 0
count = self._menu.index(tk.END) + 1
# First calculate using the font paramters of root menu:
font = self._menu.cget('font')
font = self._get_font(font)
for i in range(0, count):
mtype = self._menu.type(i)
if mtype == 'tearoff':
continue
label = 'default'
ifont = 'TkMenuFont'
if mtype != 'separator':
label = self._menu.entrycget(i, 'label')
ifont = self._menu.entrycget(i, 'font')
wpx = font.measure(label)
hpx = font.metrics('linespace')
w += wpx
if hpx > h:
h = hpx * 2
# Calculate using font configured for each subitem
ifont = self._get_font(ifont)
wpx = ifont.measure(label)
hpx = ifont.metrics('linespace')
iw += wpx
if hpx > ih:
ih = hpx * 2
# Then compare 2 sizes and use the greatest
w = max(w, iw, 100)
h = max(h, ih, 25)
self._cwidth = w + int(w * 0.25)
self._cheight = h + int(h * 0.25)
def create_preview_widget(self, parent, widget_id, xmlnode):
container = tk.Frame(parent, container=True, height=50)
container.grid(sticky='nswe')
container.rowconfigure(0, weight=1)
container.columnconfigure(0, weight=1)
self._top = top = tk.Toplevel(parent, use=container.winfo_id())
top.maxsize(2048, 50)
top.resizable(width=True, height=False)
top.update()
self.builder = self._create_builder()
self.builder.add_from_xmlnode(xmlnode)
self._menu = widget = self.builder.get_object(widget_id, top)
top.configure(menu=widget)
self._calculate_menu_wh()
return parent
def create_toplevel(self, widget_id, xmlnode):
# Create preview
builder = pygubu.Builder()
builder.add_from_xmlnode(xmlnode)
top = tk.Toplevel(self.canvas)
top.columnconfigure(0, weight=1)
top.rowconfigure(0, weight=1)
menu = builder.get_object(widget_id, top)
top['menu'] = menu
return top
MenuPreview = DefaultMenuPreview
if sys.platform == 'linux':
MenuPreview = OnCanvasMenuPreview
class ToplevelPreview(Preview):
def create_preview_widget(self, parent, widget_id, xmlnode):
xmlnode.set('class', 'pygubudesigner.ToplevelFramePreview')
layout = ET.Element('layout')
for n, v in (('row', '0'), ('column', '0'), ('sticky', 'nsew')):
p = ET.Element('property')
p.set('name', n)
p.text = v
layout.append(p)
xmlnode.append(layout)
# print(ET.tostring(xmlnode))
self.builder = self._create_builder()
self.builder.add_from_xmlnode(xmlnode)
widget = self.builder.get_object(widget_id, parent)
return widget
def create_toplevel(self, widget_id, xmlnode):
# Create preview
builder = pygubu.Builder()
builder.add_from_xmlnode(xmlnode)
top = builder.get_object(widget_id, self.canvas)
return top
class DialogPreview(ToplevelPreview):
def create_toplevel(self, widget_id, xmlnode):
top = super(DialogPreview, self).create_toplevel(widget_id, xmlnode)
top.run()
return top
# def get_widget_by_id(self, widget_id):
# return self.canvas_window
class PreviewHelper:
indicators_tag = ('nw', 'ne', 'sw', 'se')
def __init__(self, canvas):
self.canvas = canvas
self.previews = OrderedDict()
self.padding = 20
self.indicators = None
self._sel_id = None
self._sel_widget = None
self.toplevel_previews = []
self.resource_paths = []
self._moving = False
self._last_event = None
self._objects_moving = None
canvas.bind('<Button-1>', self.click_handler)
canvas.bind('<ButtonRelease-1>', self.release_handler)
canvas.bind('<Motion>', self.motion_handler)
canvas.bind('<4>', lambda event: canvas.yview('scroll', -1, 'units'))
canvas.bind('<5>', lambda event: canvas.yview('scroll', 1, 'units'))
self._create_indicators()
def add_resource_path(self, path):
self._resource_paths.append(path)
def motion_handler(self, event):
if not self._moving:
c = event.widget
x = c.canvasx(event.x)
y = c.canvasy(event.y)
if self._over_resizer(x, y):
c.configure(cursor='fleur')
else:
c.configure(cursor='')
else:
dx = event.x - self._last_event.x
dy = event.y - self._last_event.y
self._last_event = event
if dx or dy:
self.resize_preview(dx, dy)
def click_handler(self, event):
c = event.widget
x = c.canvasx(event.x)
y = c.canvasy(event.y)
if self._over_resizer(x, y):
ids = c.find_overlapping(x, y, x, y)
if ids:
self._moving = True
self._objects_moving = ids
c.configure(cursor='fleur')
self._last_event = event
def release_handler(self, event):
self._objects_moving = None
self._moving = False
def _over_resizer(self, x, y):
"Returns True if mouse is over a resizer"
over_resizer = False
c = self.canvas
ids = c.find_overlapping(x, y, x, y)
if ids:
o = ids[0]
tags = c.gettags(o)
if 'resizer' in tags:
over_resizer = True
return over_resizer
def resize_preview(self, dw, dh):
"Resizes preview that is currently dragged"
# identify preview
if self._objects_moving:
id_ = self._objects_moving[0]
tags = self.canvas.gettags(id_)
for tag in tags:
if tag.startswith('preview_'):
_, ident = tag.split('preview_')
preview = self.previews[ident]
preview.resize_by(dw, dh)
self.move_previews()
break
self._update_cregion()
def _update_cregion(self):
# update canvas scrollregion
bbox = self.canvas.bbox(tk.ALL)
padd = 20
if bbox is not None:
region = (0, 0, bbox[2] + padd, bbox[3] + padd)
self.canvas.configure(scrollregion=region)
def move_previews(self):
"Move previews after a resize event"
# calculate new positions
min_y = self._calc_preview_ypos()
for idx, (key, p) in enumerate(self.previews.items()):
new_dy = min_y[idx] - p.y
self.previews[key].move_by(0, new_dy)
self._update_cregion()
self.show_selected(self._sel_id, self._sel_widget)
def _calc_preview_ypos(self):
"Calculates the previews positions on canvas"
y = 10
min_y = [y]
for k, p in self.previews.items():
y += p.height() + self.padding
min_y.append(y)
return min_y
def _get_slot(self):
"Returns the next coordinates for a preview"
x = y = 10
for k, p in self.previews.items():
y += p.height() + self.padding
return x, y
def draw(self, identifier, widget_id, xmlnode, wclass):
preview_class = Preview
if wclass == 'tk.Menu':
preview_class = MenuPreview
elif wclass == 'tk.Toplevel':
preview_class = ToplevelPreview
elif wclass == 'pygubu.builder.widgets.dialog':
preview_class = DialogPreview
if identifier not in self.previews:
x, y = self._get_slot()
self.previews[identifier] = preview \
= preview_class(identifier, self.canvas, x, y,
self.resource_paths)
else:
preview = self.previews[identifier]
preview.update(widget_id, xmlnode)
self.reset_selected(identifier)
self.move_previews()
def _create_indicators(self):
# selected indicators
self.indicators = []
anchors = {'nw': tk.SE, 'ne': tk.SW, 'sw': tk.NE, 'se': tk.NW}
for sufix in self.indicators_tag:
label = tk.Label(self.canvas,
image=StockImage.get('indicator_' + sufix))
self.indicators.append(label)
self.canvas.create_window(-10, -10, anchor=anchors[sufix],
window=label, tags=sufix)
def _calculate_indicator_coords(self, tag, widget):
x = y = 0
wx = widget.winfo_rootx()
wy = widget.winfo_rooty()
ww = widget.winfo_width()
wh = widget.winfo_height()
cx = self.canvas.winfo_rootx()
cy = self.canvas.winfo_rooty()
if tag == 'nw':
x = wx - cx
y = wy - cy
if tag == 'ne':
x = (wx - cx) + ww
y = (wy - cy)
if tag == 'sw':
x = (wx - cx)
y = (wy - cy) + wh
if tag == 'se':
x = (wx - cx) + ww
y = (wy - cy) + wh
x, y = self.canvas.canvasx(x), self.canvas.canvasy(y)
return (x, y)
def show_selected(self, identifier, selected_id=None):
canvas = self.canvas
if selected_id is None:
for indicator in self.indicators_tag:
canvas.itemconfigure(indicator, state=tk.HIDDEN)
elif identifier in self.previews:
for indicator in self.indicators_tag:
canvas.itemconfigure(indicator, state=tk.NORMAL)
preview = self.previews[identifier]
canvas.update_idletasks()
widget = preview.get_widget_by_id(selected_id)
for indicatorw in self.indicators:
try:
indicatorw.lift(widget)
except tk.TclError:
pass
for tag in self.indicators_tag:
x, y = self._calculate_indicator_coords(tag, widget)
ox, oy = canvas.coords(tag)
canvas.move(tag, x - ox, y - oy)
self._sel_id = identifier
self._sel_widget = selected_id
def delete(self, identifier):
if identifier in self.previews:
preview = self.previews[identifier]
preview.erase()
del self.previews[identifier]
self.reset_selected(identifier)
self.move_previews()
def reset_selected(self, identifier):
if identifier == self._sel_id:
self._sel_id = None
self._sel_widget = None
def remove_all(self):
for identifier in self.previews:
self.delete(identifier)
self.resource_paths = []
def preview_in_toplevel(self, identifier, widget_id, xmlnode):
preview = self.previews[identifier]
top = preview.create_toplevel(widget_id, xmlnode)
self.toplevel_previews.append(top)
def close_toplevel_previews(self):
for top in self.toplevel_previews:
top.destroy()
self.toplevel_previews = []
|
mhcrnl/pygubu
|
pygubudesigner/previewer.py
|
Python
|
gpl-3.0
| 20,513
|
[
"FLEUR"
] |
409726a26ee43cc8fd72976c9b23942034750ecb47bff7bbf0f8c0ca66268589
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import warnings
# SciPy imports.
from scipy import linalg, special
from scipy.special import logsumexp
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, dot, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
from . import mvn
from ._stats import gaussian_kernel_estimate
__all__ = ['gaussian_kde']
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`. The square
of `kde.factor` multiplies the covariance matrix of the data in the kde
estimation.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
itemsize = np.dtype(output_dtype).itemsize
if itemsize == 4:
spec = 'float'
elif itemsize == 8:
spec = 'double'
elif itemsize in (12, 16):
spec = 'long double'
else:
raise TypeError('%s has unexpected item size %d' %
(output_dtype, itemsize))
result = gaussian_kernel_estimate[spec](self.dataset.T, self.weights[:, None],
points.T, self.inv_cov, output_dtype)
return result[:, 0]
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun_weighted(low_bounds, high_bounds,
self.dataset, self.weights,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
L = linalg.cholesky(self.covariance*2*pi)
self.log_det = 2*np.log(np.diag(L)).sum()
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
if m >= self.n:
# there are more points than data, so loop over data
energy = np.empty((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff, axis=0)
log_to_sum = 2.0 * np.log(self.weights) - self.log_det - energy.T
result = logsumexp(0.5 * log_to_sum, axis=1)
else:
# loop over points
result = np.empty((m,), dtype=float)
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0)
log_to_sum = 2.0 * np.log(self.weights) - self.log_det - energy
result[i] = logsumexp(0.5 * log_to_sum)
return result
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
|
e-q/scipy
|
scipy/stats/kde.py
|
Python
|
bsd-3-clause
| 21,517
|
[
"Gaussian"
] |
b0d20990fee456ac57bae528f27942935e8ed0772dc81adcbb5c89c099129825
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from pysces.version import __version__
__doc__ = "PySCeS JWS parser module -- uses PLY 1.5 or newer"
import os, copy
import pysces.lib.lex
import pysces.lib.yacc
from getpass import getuser
from time import sleep, strftime
from scipy import MachAr
MyMachArr = MachAr()
class JWSParser:
"""JWSParser written by Johann, based on Jannie's lexparse and integrated into PySCeS by brett -- converts PySCeS (.psc) files to JWS Online (jws) files"""
ReactionIDs = [] # List of reaction names
Names = [] # List of all reagent, parameter and function names
LexErrors = [] # List of lexing errors
NetworkDict = {} # Dictionary containing all reaction information
InitStrings = [] # Initialisation strings
InitParStrings = [] # Initialisation strings for parameters -- johann new
InitVarStrings = [] # Initialisation strings for variables -- johann new
Inits = [] # Initialised entities
Reagents = [] # All reagents found during parsing of reactions
VarReagents = [] # Variable reagents that occur in reactions
FixedReagents = [] # Fixed reagents
ReacParams = [] # Temporary list of reaction parameters
InitParams = [] # Initialised parameters
ParseErrors = []
mach_spec = MyMachArr
AllRateEqsGiven = 1 # Flag to check that all rate equations have been given
Debug = 0
##############
# Build the lexer
##############
# elementary regular expressions used as building blocks
Int = r'\d+' # Integer
Dec = Int + '\.' + Int # Decimal
# List of token names
tokens = ('FIXDEC',
'IRREV',
#'REAL', # johann -- now build up real in a p function since we want to make exponent explicit
'INT',
'DEC', # johann -- added explicitly since we no longer have REAL token
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'POWER',
'LPAREN',
'RPAREN',
'EQUALS',
'COMMA',
'REACTION_ID',
'STOICH_COEF',
'NAME',
'EXP') # johann -- new EXP token
# Simple tokens
t_IRREV = r'>'
#t_REAL = Real # johann -- no longer have a real token, now a p function
t_INT = Int
t_DEC = Dec # new DEC token
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_POWER = '\*\*'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_EQUALS = r'='
t_COMMA = r','
t_ignore = ' \t\r' # Ignore spaces and tabs --- and windows return - brett 20040229
def t_comment(self,t):
r'\#.+\n' # Match from # to newline
t.lineno += 1 # Increment line number
def t_newline(self,t):
r'\n+' # Match newline
t.lineno += len(t.value) # Increment with number of consecutive newlines
def t_EXP(self,t): # johann -- need separate EXP token to replace for Mathematica
r'\d+\.?\d*[E|e][\+|\-]?' # define EXP token merely as digits[.]digits[E|e][+|-]
t.type = 'EXP' # parse final integer separately in 'Real' p-function to remove leading zeros
t.value = t.value.replace('e',' 10^')
t.value = t.value.replace('E',' 10^')
return t
def t_FIXDEC(self,t):
r'FIX:'
t.type = 'FIXDEC'
t.value = 'FIX:'
return t
def t_REACTION_ID(self,t):
r'[a-zA-Z]\w*:' # Match any letter followed by zero or more characters
# in [a-zA-Z0-9_] up to a colon
t.type = 'REACTION_ID'
if t.value[0] == 'v' and len(t.value)>1:
t.value = t.value[1:] # remove initial 'v' if present to avoid constructions like 'v[vR1]'
t.value = 'v[' + t.value[:-1] + ']' # remove the colon and add v[] for JWS -- johann
if t.value in self.ReactionIDs:
self.LexErrors.append(('Duplicate ReactionID ', t.lineno, t.value, t.type))
else:
self.ReactionIDs.append(t.value)
return t
def t_STOICH_COEF(self,t):
r'\{\d+\}|\{\d+\.\d+\}'
t.type = 'STOICH_COEF'
t.value = t.value[1:-1]
return t
def t_NAME(self,t):
r'[a-zA-Z][\w]*' # Match any letter followed by zero or characters in the set [a-zA-Z0-9_]
if (t.value + '[t]' not in self.Names) and (t.value not in self.FuncNames): # Only add to list if absent in list
#self.Names.append(t.value)
self.Names.append(t.value + '[t]') # -- johann
#print self.Names[-1]
#hack! - brett
if t.value not in self.FuncNames: # make class attributes, ignore function names
#print 't value before', t.value
gt = t.value + '[t]'
t.value = gt
#print 't value after', t.value
t.type = 'NAME'
return t
def t_error(self,t):
self.LexErrors.append(('Lexer error ', t.lineno, t.value, t.type))
print 'Illegal character, Line ' + str(t.lineno) + ' :' + str(t.value[0])
t.skip(1)
##############
# The parser #
##############
FuncNames = ('acos', 'asin', 'atan', 'atan2', 'ceil', 'cos',
'cosh', 'exp', 'fabs', 'floor', 'fmod', 'frexp',
'hypot', 'ldexp', 'log', 'log10', 'modf', 'pow',
'sin', 'sinh', 'sqrt', 'tan', 'tanh')
precedence = (
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE'),
('left', 'POWER'),
('right', 'UMINUS')
)
def Show(self,name,tok):
if self.Debug:
print name,tok
def p_error(self,t):
self.ParseErrors.append(('Syntax error ', t.lineno, t.value, t.type))
print 'Syntax error, Line ' + str(t.lineno) + ' : ' + str(t.value)
tok = pysces.lib.yacc.token()
while tok and tok.type != 'REACTION_ID':
tok = pysces.lib.yacc.token()
return tok
def p_model(self,t):
'''Model : Statement
| Model Statement '''
self.Show('Model',t[0])
def p_statement(self,t):
'''Statement : Fixed
| ReactionLine
| Initialise'''
self.Show('Statement',t[0])
def p_fixed(self,t):
'''Fixed : FIXDEC FixList'''
self.Show('Fixed:',t[0])
def p_fixedreagents(self,t):
'''FixList : NAME
| NAME FixList'''
if t[1] != None:
self.FixedReagents.append(t[1][:-3]) # johann -- remove [t] off end
t[0] = [t[1]]
try:
t[0] += t[2]
except:
pass
self.Show('FixList', t[0])
def p_initialise(self,t):
'''Initialise : NAME EQUALS Expression'''
t[1] = t[1][:-3] + '[0]' # johann 20050302 -- Mathematica initialisation
t[0] = t[1] + t[2] + t[3]
## del temp
self.InitStrings.append(t[0].replace('=',' = '))
self.Inits.append(t[1])
self.Show('Initialisation',t[0])
def p_reaction_line(self,t):
'''ReactionLine : REACTION_ID ReactionEq
| REACTION_ID ReactionEq Expression'''
# global self.AllRateEqsGiven, ReacParams
ReacID = t[1]
if self.NetworkDict.has_key(ReacID):
self.ParseErrors.append(('Duplicate Reaction ', t.lineno, ReacID, None))
self.NetworkDict[ReacID] = {} # Reaction dictionary for ReacID
self.NetworkDict[ReacID]['Reagents'] = {} # Reagent dictionary within ReacID
# brett: if an index exists sum the coefficients instead of adding a new one
# this seems to deal with multiple definitions like X + X > Y and 2{X} + Y > Z + X
for i in t[2][0]: # First tuple member of ReactionEq contains list of (name,stoichcoef)
if self.NetworkDict[ReacID]['Reagents'].has_key(i[0]):
self.NetworkDict[ReacID]['Reagents'][i[0]] = self.NetworkDict[ReacID]['Reagents'][i[0]] + i[1]
else:
self.NetworkDict[ReacID]['Reagents'][i[0]] = i[1] # Key for reagent with stoichcoef value
killList = []
# brett: however for the case of X + Y > Y + Z where the sum of the coefficients
# is zero we can delete the key (Y) out of the reaction list altgether (hopefully!)
for i in self.NetworkDict[ReacID]['Reagents']:
if abs(self.NetworkDict[ReacID]['Reagents'][i]) < self.mach_spec.eps*100.0:
killList.append(i)
#print self.mach_spec.eps*100.0, self.NetworkDict[ReacID]['Reagents']
#print killList, self.NetworkDict[ReacID]['Reagents']
# brett: and the easiest way of doing this is putting the zero keys in a list
# and deleting them out of the dictionary
if len(killList) != 0:
for i in killList:
del self.NetworkDict[ReacID]['Reagents'][i]
#print killList, self.NetworkDict[ReacID]['Reagents']
self.NetworkDict[ReacID]['Type'] = t[2][1] # Second tuple member of ReactionEq contains type
try: # Save rate equation and create parameter list
self.NetworkDict[ReacID]['RateEq'] = t[3]
self.NetworkDict[ReacID]['Params'] = self.ReacParams
self.ReacParams = [] # Reset global self.ReacParams list
except:
self.NetworkDict[ReacID]['RateEq'] = ''
self.NetworkDict[ReacID]['Params'] = []
self.AllRateEqsGiven = 0 # Set global flag to false
self.Show('ReactionLine',t[0])
self.Show('t1',t[1])
self.Show('t2',t[2])
self.Show('t3',t[3])
def p_reaction_eq(self,t):
'''ReactionEq : LeftHalfReaction EQUALS RightHalfReaction
| LeftHalfReaction IRREV RightHalfReaction'''
ReacType = ''
if t[2] == '=':
ReacType = 'Rever'
elif t[2] == '>':
ReacType = 'Irrev'
t[0] = (t[1] + t[3], ReacType)
self.Show('ReactionEq',t[0])
def p_left_half_reaction(self,t):
''' LeftHalfReaction : SubstrateTerm
| SubstrateTerm PLUS LeftHalfReaction'''
# Make a list of substrate terms
t[0] = [t[1]]
try:
t[0] += t[3]
except:
pass
# brett
# print "lhr ", t[0]
self.Show ('LeftHalfReaction', t[0])
def p_right_half_reaction(self,t):
''' RightHalfReaction : ProductTerm
| ProductTerm PLUS RightHalfReaction'''
# Make a list of product terms
t[0] = [t[1]]
try:
t[0] += t[3]
except:
pass
# brett
# print "rhr ", t[0]
self.Show ('RightHalfReaction', t[0])
def p_substrate_term(self,t):
'''SubstrateTerm : STOICH_COEF NAME
| NAME'''
# Make tuple of NAME and stoichiometric coefficient
# (< 0 because substrate)
try:
t[0] = (t[2], -float(t[1]))
if t[2] not in self.Reagents:
self.Reagents.append(t[2])
except:
t[0] = (t[1], -1.0)
if t[1] not in self.Reagents:
self.Reagents.append(t[1])
self.Show ('SubstrateTerm', t[0])
def p_product_term(self,t):
'''ProductTerm : STOICH_COEF NAME
| NAME'''
# Make tuple of NAME and stoichiometric coefficient
# (> 0 because product)
try:
t[0] = (t[2], float(t[1]))
if t[2] not in self.Reagents:
self.Reagents.append(t[2])
except:
t[0] = (t[1], 1.0)
if t[1] not in self.Reagents:
self.Reagents.append(t[1])
self.Show ('ProductTerm', t[0])
def p_rate_eq(self,t):
'''Expression : Expression PLUS Expression
| Expression MINUS Expression
| Expression TIMES Expression
| Expression DIVIDE Expression
| Power
| Number
| Func'''
# |UMINUS : add if the
# alternative for p_uminus is used
if len(t.slice)==4:
t[0] = t[1] + t[2] + t[3]
else:
t[0] = t[1]
def p_power(self,t):
'''Power : Expression POWER Expression'''
t[0] = 'Power['+ t[1] + ',' + t[3] + ']' #changed to Mathematica notation -- johann
def p_uminus(self,t):
'''Expression : MINUS Expression %prec UMINUS'''
# Alternative '''UMINUS : MINUS Expression'''
t[0] = t[1] + t[2]
def p_number(self,t):
'''Number : Real
| INT
| DEC
| NAME'''
# Build list of entities
try:
float(t[1]) # check for a number
except:
if (t[1] not in self.FuncNames) and (t[1] not in self.ReacParams) and (' 10^' not in t[1]):
# ignore function names, duplications and exponentials
self.ReacParams.append(t[1])
#self.ReacParams.append('self.' + t[1])
t[0] = t[1]
def p_real(self,t):
'''Real : EXP INT'''
loop = 1
while loop == 1: # remove leading zeros from exponent
if t[2][0] == '0' and len(t[2])>1:
t[2] = t[2][1:]
else:
loop=0
t[0] = t[1] + t[2]
def p_function(self,t):
'''Func : LPAREN ArgList RPAREN
| NAME LPAREN ArgList RPAREN'''
try:
t[0] = t[1] + t[2] + t[3] + t[4]
except:
t[0] = t[1] + t[2] + t[3]
def p_arglist(self,t):
'''ArgList : Expression
| Expression COMMA Expression'''
t[0] = t[1]
try:
t[0] += t[2] + t[3]
except:
pass
############################################
# end of lexer and parser definitions
############################################
def psc2jws(self,File,indir=None,outdir=None,quiet=1,debug=0):
"""
psc2jws(File,indir=None,outdir=None,quiet=1,debug=0)
Convert a PySCeS (.psc) file to a JWS Online (.jws) file. Call with the input file name, note the input (indir) and output (outdir) can optionally be specified.
Arguments:
=========
File: PSC input file
indir [default=None]: directory of PSC file
outdir [default=None]: output directory for JWS file
quiet [default=1]: turn lex/parse noise on/off
debug [default=0]: optionally output debug information
"""
if indir == None:
indir = os.getcwd()
if outdir == None:
outdir = os.getcwd()
if os.path.exists(os.path.join(indir,File)) and File[-4:] == '.psc':
go = 1
else:
print '\nIgnoring non-PySCeS model file: ' + os.path.join(indir,File)
go = 0
if go == 1:
# clean up the modules
reload(pysces.lib.lex) # brett's bugbear code these have to be here ALWAYS!!
reload(pysces.lib.yacc)
# clean up the instance
self.ReactionIDs = [] # List of reaction names
self.Names = [] # List of all reagent, parameter and function names
self.LexErrors = [] # List of lexing errors
self.NetworkDict = {} # Dictionary containing all reaction information
self.InitStrings = [] # Initialisation strings
self.Inits = [] # Initialised entities
self.Reagents = [] # All reagents found during parsing of reactions
self.FixedReagents = [] # Fixed reagents
self.ReacParams = [] # Temporary list of reaction parameters
self.ParseErrors = []
self.InitParStrings = [] # Initialisation strings for parameters -- johann new
self.InitVarStrings = [] # Initialisation strings for variables -- johann new
self.VarReagents = [] # Variable reagents that occur in reactions
self.InitParams = [] # Initialised parameters
print '\nParsing file: '+ os.path.join(indir,File)
Data = open(os.path.join(indir,File),'r')
Model = Data.read()
Data.close()
self.Debug = debug
self.AllRateEqsGiven = 1 # Flag to check that all rate equations have been given
# try and find a temporary workspace or use cwd
if os.environ.has_key('TMP'):
tempDir = os.environ['TMP']
elif os.environ.has_key('TEMP'):
tempDir = os.environ['TEMP']
else:
tempDir = os.getcwd()
os.chdir(tempDir)
# fix filenames for intermediary files - brett
if not File[:-4].isalnum():
FileL = list(File)
FileT = ''
for let in FileL:
if let.isalnum():
FileT += let
# instantiate the lexer and parser
self.debugfile = '_jws' + FileT[:-3] + ".dbg"
self.tabmodule = '_jws' + FileT[:-3] + "_" + "parsetab"
else:
self.debugfile = '_jws' + File[:-4] + ".dbg"
self.tabmodule = '_jws' + File[:-4] + "_" + "parsetab"
if self.Debug:
print self.tabmodule
print self.debugfile
pysces.lib.lex.lex(module=self, debug=self.Debug)
pysces.lib.lex.input(Model)
pysces.lib.yacc.yacc(module=self,
debug=self.Debug,
debugfile=self.debugfile,
tabmodule=self.tabmodule)
os.chdir(outdir)
while 1:
tok = pysces.lib.lex.token()
if not tok: break
if self.LexErrors != []: print 'self.LexErrors = ', self.LexErrors, '\n'
while 1:
p = pysces.lib.yacc.parse(Model)
if not p: break
# we have the dictionary get rid of this stuff
del Model, p
# Create list of variable reagents and remove '[t]' from fixed reagents
for i in range(len(self.Reagents)): # johann -- new construction otherwise list elements not replaced
if self.Reagents[i][:-3] not in self.FixedReagents:
self.VarReagents.append(self.Reagents[i])
if self.Reagents[i][:-3] in self.FixedReagents:
self.Reagents[i] = self.Reagents[i][:-3]
# Create list of initialised parameters
for i in range(len(self.Inits)): # johann -- reworked extensively
if self.Inits[i][:-3]+'[t]' not in self.VarReagents:
self.InitStrings[i] = self.InitStrings[i].replace('[0]','')
self.InitStrings[i] = self.InitStrings[i].replace('[t]','') # capture params initialised i.t.o. other params
self.Inits[i] = self.Inits[i][:-3]
self.InitParams.append(self.Inits[i])
self.InitParStrings.append(self.InitStrings[i])
elif self.Inits[i][:-3]+'[t]' in self.VarReagents:
self.InitVarStrings.append(self.InitStrings[i])
# In self.NetworkDict, clean rate equation parameter list of variables that occur in that reaction
# Add FixedReagent to Params even if not a parameter in rate eqn (requirement to add '$' below)
for id in self.NetworkDict.keys():
for reag in self.VarReagents:
if reag in self.NetworkDict[id]['Params']:
self.NetworkDict[id]['Params'].remove(reag)
for reag in self.FixedReagents:
if (reag+'[t]' in self.NetworkDict[id]['Reagents'].keys()) and (reag not in self.NetworkDict[id]['Params']):
self.NetworkDict[id]['Params'].append(reag+'[t]')
# Warn if no reagents have been fixed
if self.FixedReagents == []:
print 'Warning: No reagents have been fixed'
else: # Warn if a fixed reagent does not occur in a reaction equation
for reag in self.FixedReagents:
if reag not in self.Reagents:
print 'Warning: ' + reag + ' (fixed) does not occur in any reaction'
# Check whether all parameters have been initialised
# johann -- remove [t] from params
for id in self.NetworkDict.keys():
for i in range(len(self.NetworkDict[id]['Params'])):
self.NetworkDict[id]['Params'][i] = self.NetworkDict[id]['Params'][i][:-3]
if self.NetworkDict[id]['Params'][i] not in self.InitParams:
print 'Warning: Parameter ' + self.NetworkDict[id]['Params'][i] + ' has not been initialised'
# Check whether all variable reagents have been initialised
for reag in self.VarReagents:
if reag[:-3]+'[0]' not in self.Inits:
print 'Warning: Variable ' + reag + ' has not been initialised'
# Check that all initialised parameters actually occur in self.Inits
known = 0
for param in self.InitParams:
for id in self.NetworkDict.keys():
if param in self.NetworkDict[id]['Params']:
known = 1
break
else:
known = 0
if not known: print 'Warning: ' + param + \
' has been initialised but does not occur in any rate equation'
# clean up rate equations in self.NetworkDict to remove [t] for Params
# clean up Reagents to remove [t] and add $ for fixed
for id in self.NetworkDict.keys():
for param in self.NetworkDict[id]['Params']:
self.NetworkDict[id]['RateEq'] = self.NetworkDict[id]['RateEq'].replace(param+'[t]',param)
for reag in self.NetworkDict[id]['Reagents'].keys():
if reag[:-3] in self.NetworkDict[id]['Params']:
saveval = self.NetworkDict[id]['Reagents'].pop(reag)
self.NetworkDict[id]['Reagents']['$'+reag[:-3]] = saveval
else:
saveval = self.NetworkDict[id]['Reagents'].pop(reag)
self.NetworkDict[id]['Reagents'][reag[:-3]] = saveval
# output errors
if self.ParseErrors != []: print 'Parse errors occurred: ', self.ParseErrors
# debugging
if debug:
print '\n\n\n'
print '\nself.ReactionIDs: ',self.ReactionIDs
print '\nself.NetworkDict: ',self.NetworkDict
print '\nself.Names: ',self.Names
print '\nself.Inits: ',self.Inits
print '\nself.InitStrings: ',self.InitStrings
print '\nself.InitParStrings: ',self.InitParStrings
print '\nself.InitVarStrings: ',self.InitVarStrings
print '\nself.InitParams: ',self.InitParams
print '\nself.Reagents: ',self.Reagents
print '\nself.FixedReagents: ',self.FixedReagents
print '\nself.VarReagents: ',self.VarReagents
print '\nParseErrors: ',self.ParseErrors
# now write the jws output file
filename = File[:-4]
filename = self.chkjws(filename)
go = 0
loop = 0
filex = ''
while loop == 0:
try:
filex = os.path.join(outdir,filename)
f = open(filex,'r')
f.close()
input = raw_input('\nFile "' + filex + '" exists.\nOverwrite? ([y]/n) ')
if input == 'y' or input == '':
go = 1
loop = 1
elif input == 'n':
filename = raw_input('\nFile "' + filename + '" exists. Enter a new filename: ')
go = 1
filex = os.path.join(outdir,filename)
filename = self.chkjws(filename)
else:
print '\nInvalid input'
except:
print '\nFile "' + filex + '" does not exist, proceeding...'
loop = 1
go = 1
if go == 1:
try:
UseR = getuser()
except:
UseR = ''
outFile = open(filex,'w')
header = ''
#header += '############################################################\n'
header += '# JWS model input file \n'
header += '# Generated by PySCeS (' + __version__ + ') (http://pysces.sourceforge.net) \n'
header += '# Pysces input file: ' + File + '\n'
header += '# This file generated: ' + strftime("%a, %d %b %Y %H:%M:%S") + ' by '+UseR+' \n'
header += '###########################################################\n\n'
outFile.write(header)
# modelname
modelname = File[:-4]
outFile.write('begin name\n' + modelname + '\nend name\n\n')
# reactions and rate equations
reaction_list = []
rateeq_list = []
nd = self.NetworkDict
reaclist = copy.copy(nd.keys()) # johann -- to sort self.ReactionIDs neatly ;-)
reaclist.sort()
for key in reaclist: # key = reaction name
reagL = []
reagR = []
Req = copy.copy(nd[key]['RateEq'])
for reagent in nd[key]['Reagents']:
if nd[key]['Reagents'][reagent] > 0:
reagR.append('{' + str(abs(nd[key]['Reagents'][reagent])) + '}' + reagent)
elif nd[key]['Reagents'][reagent] < 0:
reagL.append('{' + str(abs(nd[key]['Reagents'][reagent])) + '}' + reagent)
substring = ''
count = 0
for x in reagL:
if count != 0:
substring += ' + '
substring += x.replace(' ','')
count += 1
prodstring = ''
count = 0
for x in reagR:
if count != 0:
prodstring += ' + '
prodstring += x.replace(' ','')
count += 1
symbol = ' = '
reaction_list.append(key + '\t' + substring + symbol + prodstring)
rateeq_list.append(key + ' = ' + Req)
outFile.write('begin reactions\n')
for x in reaction_list:
outFile.write(x+'\n')
outFile.write('end reactions\n\n')
outFile.write('begin rate equations\n')
for x in rateeq_list:
outFile.write(x+'\n')
outFile.write('end rate equations\n\n')
# parameters
outFile.write('begin parameters\n')
for x in self.InitParStrings:
outFile.write(x+'\n')
outFile.write('end parameters\n\n')
# species initial values
outFile.write('begin initial conditions\n')
for x in self.InitVarStrings:
outFile.write(x+'\n')
outFile.write('end initial conditions\n\n')
# close output file
outFile.close()
# print to stdout if quiet is set to zero
if quiet == 0:
print '\nModel name: ' + modelname
print "\nReactions:"
for x in reaction_list:
print x
print "\nRate Equations:"
for x in rateeq_list:
print x
print '\nParameters:'
for x in self.InitParStrings:
print x
print '\nSpecies Initial Values:'
for x in self.InitVarStrings:
print x
def chkjws(self,File):
"""
chkjws(File)
Checks if a filename has a .jws extension and adds one to the returned filename if needed
Arguments:
=========
File: the filename to check
"""
try:
if File[-4:] == '.jws':
pass
else:
print 'Assuming extension is .jws'
File += '.jws'
except:
print 'Chkjws error'
return File
if __name__ == '__main__':
import os, sys
from time import sleep
inDiR = 'c://mypysces//pscmodels'
outDiR = 'c://mypysces//jws'
jwp = JWSParser()
for mod in os.listdir(inDiR):
jwp.psc2jws(mod,indir=inDiR,outdir=outDiR,quiet=1,debug=0)
psp = PySCeSParser(debug=0)
|
asttra/pysces
|
pysces/PyscesJWSParse.py
|
Python
|
bsd-3-clause
| 31,055
|
[
"PySCeS"
] |
be97686854406f13b98d365431eaef3be21d86488f224ce1c9c2c018e5697078
|
# -*- coding: utf-8 -*-
"""
This module only contains the forms for a patient for searching filled-in
information, editing notifications settings, and editing personalia.
For managers/secretary/healthprofessionals forms are included for
administration of patients.
:subtitle:`Class definitions:`
"""
from django import forms
from datetime import date
from django.forms.utils import ErrorList
from django.utils.translation import ugettext_lazy as _
from core.forms import BaseModelForm, BaseForm,\
ChoiceOtherField, FormDateField, MultipleChoiceField
from apps.healthperson.patient.models import Patient,\
DIAGNOSIS_CHOICES, REGULAR_CONTROL_FREQ,\
BLOOD_SAMPLE_FREQ, CLINIC_VISIT_CHOICES
from apps.healthperson.healthprofessional.models import HealthProfessional
from apps.account.forms import BaseProfileEditForm, BasePasswordProfileEditForm
PASSWORD_CHOICES = (
('', ('---------')),
('yes', ('Ja')),
('no', ('Nee')),
)
class PatientSearchForm(BaseForm):
"""
Search for a patient.
Used by all healthpersons except for patients.
"""
BSN = forms.CharField(
max_length=128, label=_('BSN'), required=False)
local_hospital_number = forms.CharField(
max_length=128, label=_('Lokaal ziekenhuisnummer'), required=False)
last_name = forms.CharField(
max_length=128, label=_('Achternaam'), required=False)
years = list(range(date.today().year - 100, date.today().year + 1))
date_of_birth = FormDateField(
label=_('Geboortedatum'), years=years, allow_future_date=False,
future=False, required=False)
class PatientDiagnoseControleEditForm(BaseModelForm):
"""
Edit the diagnose and controle settings.
Used by a healthprofessional or manager.
"""
exclude_questionnaires = MultipleChoiceField(
label=_('Selecteer vragenlijsten'))
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
super(PatientDiagnoseControleEditForm, self).__init__(*args, **kwargs)
health_professionals = []
hospital = None
if instance.user:
hospital = instance.user.hospital
health_professionals_list = HealthProfessional.objects.filter(
user__hospital=hospital)
for health_professional in health_professionals_list:
health_professionals.append(
(health_professional.id,
health_professional.user.professional_name))
health_professionals.insert(0, ('', '---------'))
self.fields['current_practitioner'].choices = health_professionals
# self.fields['exclude_questionnaires'].choices = health_professionals
if instance:
self.fields['current_practitioner'].initial =\
instance.current_practitioner.id
self.fields['diagnose'].widget.attrs.update(
{'class': 'choice_display_diagnose'})
def clean(self):
cleaned_data = super(PatientDiagnoseControleEditForm, self).clean()
del self.errors['exclude_questionnaires']
return cleaned_data
class Meta:
model = Patient
exclude = ('rc_registration_number', 'last_blood_sample',
'health_person_id', 'added_on', 'added_by',
'regular_control_start_notification',
'regular_control_reminder_notification',
'healthprofessional_handling_notification',
'message_notification')
fieldsets = (
(None, {'fields': ('diagnose',)}),
('diagnose', {'fields': ('exclude_questionnaires', )}),
(None, {'fields': ('current_practitioner',
'regular_control_frequency',
'blood_sample_frequency',
'always_clinic_visit')}),
)
class PatientNotificationEditForm(BaseModelForm):
"""
Edit the notification settings.
Used by a patient.
"""
class Meta:
model = Patient
exclude = ('health_person_id', 'rc_registration_number',
'diagnose', 'current_practitioner', 'prefix',
'regular_control_frequency',
'blood_sample_frequency', 'last_blood_sample',
'always_clinic_visit', 'excluded_questionnaires')
fieldsets = (
(None, {'fields': ('regular_control_start_notification',
'regular_control_reminder_notification',
'healthprofessional_handling_notification',
'message_notification',)}),
)
class PatientProfileEditForm(BasePasswordProfileEditForm):
'''
Edit patient profile form
used by the patient self
'''
class Meta(BasePasswordProfileEditForm.Meta):
exclude = BasePasswordProfileEditForm.Meta.exclude + (
'BSN', 'local_hospital_number',
'initials', 'prefix', 'gender', 'date_of_birth',
'last_name', 'first_name',
'title', 'hospital',)
fieldsets = (
(None, {'fields': ('mobile_number', 'mobile_number2',
'email', 'email2',
'change_password')}),
('change_password', {'fields': ('password', 'password2')}),
)
class PatientPersonaliaEditForm(BasePasswordProfileEditForm):
'''
Edit patient profile form
used by an healthprofessional and secretary
'''
class Meta(BasePasswordProfileEditForm.Meta):
exclude = BasePasswordProfileEditForm.Meta.exclude
fieldsets = (
(None, {'fields': ('BSN',
'local_hospital_number', 'hospital',
'title', 'first_name', 'initials',
'prefix', 'last_name', 'gender',
'date_of_birth')}),
(None, {'fields': ('mobile_number', 'mobile_number2',
'email', 'email2')}),
)
class PatientPersonaliaEditFormManager(BasePasswordProfileEditForm):
'''
Edit patient profile form
used by an manager.
'''
# Validators
change_password = forms.TypedChoiceField(
choices=PASSWORD_CHOICES,
label=_('Maak wachtwoord ongeldig?'), required=False)
class Meta(BasePasswordProfileEditForm.Meta):
exclude = BasePasswordProfileEditForm.Meta.exclude
fieldsets = (
(None, {'fields': ('BSN',
'local_hospital_number',
'hospital', 'title',
'first_name', 'initials', 'prefix',
'last_name', 'gender',
'date_of_birth')}),
(None, {'fields': ('mobile_number', 'mobile_number2',
'email', 'email2',
'change_password')}),
)
class PatientAddForm(BaseProfileEditForm):
'''
Add new patient form
'''
# Diagnose
diagnose = forms.TypedChoiceField(
label=_('Diagnose'), required=True)
# current_practitioner
current_practitioner = forms.ChoiceField(
label=_('Hoofdbehandelaar'), required=True)
# Regular control freq.
reqular_control_choices = list(REGULAR_CONTROL_FREQ)
reqular_control_choices.insert(0, ('', '---------'))
regular_control_frequency = ChoiceOtherField(
choices=reqular_control_choices,
other_field=forms.TextInput,
label=_('Frequentie reguliere controle'), required=True)
# Blood sample frequency
blood_sample_choices = list(BLOOD_SAMPLE_FREQ)
blood_sample_choices.insert(0, ('', '---------'))
blood_sample_frequency = ChoiceOtherField(
choices=blood_sample_choices,
other_field=forms.TextInput,
label=_('Frequentie bloedprikken'), required=True)
# Always clinic visit
always_clinic_choices = list(CLINIC_VISIT_CHOICES)
always_clinic_choices.insert(0, ('', '---------'))
always_clinic_visit = forms.ChoiceField(
choices=always_clinic_choices,
label=_('Volgt altijd een polikliniekbezoek?'), required=True)
exclude_questionnaires = MultipleChoiceField(
label=_('Selecteer vragenlijsten'))
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super(PatientAddForm, self).__init__(*args, **kwargs)
self.user = user
# Set diagnose list
diagnose_choices = list(DIAGNOSIS_CHOICES)
diagnose_choices.insert(0, ('', '---------'))
self.fields['diagnose'].choices = diagnose_choices
# set health professionals choices
health_professionals = []
if user:
health_professionals_list = HealthProfessional.objects.filter(
user__hospital=user.hospital)
for health_professional in health_professionals_list:
health_professionals.append(
(health_professional.health_person_id,
health_professional.user.professional_name))
health_professionals.insert(0, ('', '---------'))
self.fields['current_practitioner'].choices = health_professionals
self.fields['diagnose'].widget.attrs.update(
{'class': 'choice_display_diagnose'})
def clean(self):
cleaned_data = super(PatientAddForm, self).clean()
del self.errors['exclude_questionnaires']
# Check if 'other' in frequency fields is digit
if (('regular_control_frequency' in cleaned_data and
cleaned_data['regular_control_frequency'] not in (None, ''))):
choices = []
for choice in self.fields['regular_control_frequency'].choices:
choices.append(choice[0])
if cleaned_data['regular_control_frequency'] not in choices:
if not cleaned_data['regular_control_frequency'].isdigit():
self.errors['regular_control_frequency'] = ErrorList(
[_('Geef een getal op.')])
if (('blood_sample_frequency' in cleaned_data and
cleaned_data['blood_sample_frequency'] not in (None, ''))):
choices = []
for choice in self.fields['blood_sample_frequency'].choices:
choices.append(choice[0])
if cleaned_data['blood_sample_frequency'] not in choices:
if not cleaned_data['blood_sample_frequency'].isdigit():
self.errors['blood_sample_frequency'] = ErrorList(
[_('Geef een getal op.')])
return cleaned_data
class Meta(BaseProfileEditForm.Meta):
exclude = BaseProfileEditForm.Meta.exclude
fieldsets = (
(None, {'fields': ('BSN',
'local_hospital_number',
'hospital', 'title',
'first_name', 'initials', 'prefix',
'last_name', 'gender',
'date_of_birth')}),
(None, {'fields': ('mobile_number', 'mobile_number2',
'email', 'email2',
'diagnose',)}),
('diagnose', {'fields': ('exclude_questionnaires', )}),
(None, {'fields': ('current_practitioner',
'regular_control_frequency',
'blood_sample_frequency',
'always_clinic_visit')}),
)
|
acesonl/remotecare
|
remotecare/apps/healthperson/patient/forms.py
|
Python
|
gpl-3.0
| 11,598
|
[
"VisIt"
] |
52c228d50a0c219ab43f47558184e0bb693fc38d270246ceaa657a5f2a244e12
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Contain and organize bibliographic information.
"""
import string
import math
from ...lib.citation import Citation as lib_Citation
class Citation(object):
"""
Store information about a citation and all of its references.
"""
def __init__(self):
"""
Initialize members.
"""
self.__src_handle = None
self.__ref_list = []
def get_source_handle(self):
"""
Provide the handle to the source that this citation is for.
@return: Source Handle
@rtype: handle
"""
return self.__src_handle
def set_source_handle(self, handle):
"""
Set the handle for the source that this citation is for.
@param handle: Source Handle
@type handle: handle
"""
self.__src_handle = handle
def get_ref_list(self):
"""
List all the references to this citation.
@return: a list of references
@rtype: list of L{gen.lib.srcref} objects
"""
return self.__ref_list
def add_reference(self, source_ref):
"""
Add a reference to this citation. If a similar reference exists, don't
add another one.
@param source_ref: Source Reference
@type source_ref: L{gen.lib.citation}
@return: The key of the added reference among all the references.
@rtype: char
"""
letter_count = len(string.ascii_lowercase)
ref_count = len(self.__ref_list)
x_ref_count = ref_count
# Return "a" for ref_count = 0, otherwise log(0) does not work
if ref_count == 0:
self.__ref_list.append(("a", source_ref))
return "a"
last_letter = string.ascii_lowercase[ ref_count % letter_count ]
key = ""
# Calculate prek number of digits.
number_of_letters = int(math.log(float(ref_count), float(letter_count)))+1
# Exclude index for number_of_letters-1
for n in range(1, number_of_letters-1):
ref_count -= pow(letter_count, n)
# Adjust number_of_letters for new index
number_of_letters = int(math.log(float(ref_count), float(letter_count))) +1
for n in range(1, number_of_letters):
x_ref_count -= pow(letter_count, n)
for letter in range(1, number_of_letters):
index = x_ref_count / pow(letter_count, letter) % letter_count
key += string.ascii_lowercase[ index ]
key = key + last_letter
self.__ref_list.append((key, source_ref))
return key
class Bibliography(object):
"""
Store and organize multiple citations into a bibliography.
"""
MODE_DATE = 2**0
MODE_PAGE = 2**1
MODE_CONF = 2**2
MODE_NOTE = 2**3
MODE_MEDIA = 2**4
MODE_ALL = MODE_DATE | MODE_PAGE | MODE_CONF | MODE_NOTE | MODE_MEDIA
def __init__(self, mode=MODE_ALL):
"""
A bibliography will store citations (sources) and references to those
citations (citations). Duplicate entries will not be added. To change
what is considered duplicate, you can tell the bibliography what source
ref information you are interested in by passing in the mode.
Possible modes include:
MODE_DATE
MODE_PAGE
MODE_CONF
MODE_NOTE
MODE_MEDIA
MODE_ALL
If you only care about pages, set "mode=MODE_PAGE".
If you only care about dates and pages, set "mode=MODE_DATE|MODE_PAGE".
If you care about everything, set "mode=MODE_ALL".
"""
self.__citation_list = []
self.mode = mode
def add_reference(self, lib_citation):
"""
Add a reference to a source to this bibliography. If the source already
exists, don't add it again. If a similar reference exists, don't
add another one.
@param citation: Citation object
@type citation: L{gen.lib.Citation}
@return: A tuple containing the index of the source among all the
sources and the key of the reference among all the references. If
there is no reference information, the second element will be None.
@rtype: (int,char) or (int,None)
N.B. Within this file, the name 'citation' is used both for
gen.lib.Citation, and for _bibliography.Citation. It is not clear how
best to rename the concepts in this file to avoid the clash, so the
names have been retained. In this function, lib_citation is used for
gen.lib.Citation instances, and citation for _bibliography.Citation
instances. Elsewhere in this file, source_ref is used for
gen.lib.Citation instances.
"""
source_handle = lib_citation.get_reference_handle()
cindex = 0
rkey = ""
citation = None
citation_found = False
for citation in self.__citation_list:
if citation.get_source_handle() == source_handle:
citation_found = True
break
cindex += 1
if not citation_found:
citation = Citation()
citation.set_source_handle(source_handle)
cindex = len(self.__citation_list)
self.__citation_list.append(citation)
if self.__sref_has_info(lib_citation):
for key, ref in citation.get_ref_list():
if self.__srefs_are_equal(ref, lib_citation):
# if a reference like this already exists, don't add
# another one
return (cindex, key)
rkey = citation.add_reference(lib_citation)
return (cindex, rkey)
def get_citation_count(self):
"""
Report the number of citations in this bibliography.
@return: number of citations
@rtype: int
"""
return len(self.__citation_list)
def get_citation_list(self):
"""
Return a list containing all the citations in this bibliography.
@return: citation list
@rtype: list of L{Citation} objects
"""
return self.__citation_list
def __sref_has_info(self, source_ref):
"""
Determine if this source_ref has any useful information based on the
current mode.
"""
if ( self.mode & self.MODE_PAGE ) == self.MODE_PAGE:
if source_ref.get_page() != "":
return True
if ( self.mode & self.MODE_DATE ) == self.MODE_DATE:
date = source_ref.get_date_object()
if date is not None and not date.is_empty():
return True
if ( self.mode & self.MODE_CONF ) == self.MODE_CONF:
confidence = source_ref.get_confidence_level()
if confidence is not None and confidence != \
lib_Citation.CONF_NORMAL:
return True
if ( self.mode & self.MODE_NOTE ) == self.MODE_NOTE:
if len(source_ref.get_note_list()) != 0:
return True
if ( self.mode & self.MODE_MEDIA ) == self.MODE_MEDIA:
if len(source_ref.get_media_list()) != 0:
return True
# Can't find anything interesting.
return False
def __srefs_are_equal(self, source_ref1, source_ref2):
"""
Determine if two source references are equal based on the
current mode.
"""
# The criterion for equality (in mode==MODE_ALL) is changed for
# citations. Previously, it was based on is_equal from SecondaryObject,
# which does a 'cmp' on the serialised data. (Note that this might not
# have worked properly for Dates; see comments in Date.is_equal and
# EditCitation.data_has_changed). The comparison is now made as to
# whether the two gen.lib.Citations have the same handle (i.e. they are
# actually the same database objects). It is felt that this better
# reflects the intent of Citation objects, which can be merged if they
# are intended to represent the same citation.
if self.mode == self.MODE_ALL:
return source_ref1.handle == source_ref2.handle
if ( self.mode & self.MODE_PAGE ) == self.MODE_PAGE:
if source_ref1.get_page() != source_ref2.get_page():
return False
if ( self.mode & self.MODE_DATE ) == self.MODE_DATE:
date1 = source_ref1.get_date_object()
date2 = source_ref2.get_date_object()
if not date1.is_equal(date2):
return False
if ( self.mode & self.MODE_CONF ) == self.MODE_CONF:
conf1 = source_ref1.get_confidence_level()
conf2 = source_ref2.get_confidence_level()
if conf1 != conf2:
return False
if ( self.mode & self.MODE_NOTE ) == self.MODE_NOTE:
nl1 = source_ref1.get_note_list()
nl2 = source_ref2.get_note_list()
if len(nl1) != len(nl2):
return False
for notehandle in nl1:
if notehandle not in nl2:
return False
if ( self.mode & self.MODE_MEDIA ) == self.MODE_MEDIA:
nl1 = source_ref1.get_media_list()
nl2 = source_ref2.get_media_list()
if len(nl1) != len(nl2):
return False
for mediahandle in nl1:
if mediahandle not in nl2:
return False
# Can't find anything different. They must be equal.
return True
|
Forage/Gramps
|
gramps/gen/plug/report/_bibliography.py
|
Python
|
gpl-2.0
| 10,547
|
[
"Brian"
] |
b80aed0f970e967f6c544c564f8434dbad00ec76f1d5fa3bf7e9b16779b62f20
|
# Encoding: utf-8
import sys
import os
import pymatgen.io.nwchem as nwchem
if os.path.isdir(sys.argv[1]):
dirname = sys.argv[1]
dir_list = [directory for directory in os.listdir(dirname)
if os.path.isdir(directory)]
for directory in dir_list:
print("Processing output in " + os.path.join(directory, 'result.out') +
"...")
output = nwchem.NwOutput(os.path.join(directory, 'result.out'))
try:
error = False
for data in output.data:
if data['has_error']:
error = True
if error:
print("File: " + os.path.join(directory, 'result.out') +
" contains errors!")
elif output.data[-1]['task_time'] == 0:
print('No timing information found in ' +
os.path.join(directory, 'result.out') + ".")
else:
output.to_file(os.path.join(directory, 'data.json'))
except NameError:
print("No data found in file. ")
except IndexError:
print("Data is empty!")
else:
output_file = sys.argv[1]
dir_name = os.path.dirname(output_file)
print('Processing output in ' + output_file)
try:
output = nwchem.NwOutput(output_file)
except:
raise IOError('Could not find proper nwchem output file.')
output.to_file(os.path.join(dir_name, 'data.json'))
|
mbercx/cage
|
cage/scripts/process_output.py
|
Python
|
mit
| 1,468
|
[
"NWChem",
"pymatgen"
] |
31602607139caefec9d84d3eb108a217d4cce33359ef861a2f369ccc4bd668dc
|
#!/usr/bin/env python3
# @package fill_missing
# \brief This script solves the Laplace equation as a method of filling holes in map-plane data.
#
# \details The script is an implementation of the SOR method with Chebyshev
# acceleration for the Laplace equation, as described in 'Numerical Recipes in
# Fortran: the art of scientific computing' by William H. Press et al -- 2nd
# edition.
#
# Note also that this script can be used both from the command line and as a
# Python module -- by adding 'from fill_missing import laplace' to your
# program.
# Uses an approximation to Laplace's equation
# \f[ \nabla^2 u = 0 \f]
# to smoothly replace missing values in two-dimensional NetCDF variables with the average of the ``nearby'' non-missing values.
# Here is hypothetical example, filling the missing values in the variables \c topg and \c usurf, using a convergence tolerance of \f$10^{-4}\f$ and the initial guess of \f$100\f$, on data in the NetCDF file \c data.nc :
# \code
# fill_missing.py -f data.nc -v topg,usurf --eps=1.0e-4 \
# -i 100.0 -o data_smoothed.nc
# \endcode
# Options \c -i and \c -e specify the initial guess and the convergence tolerance for \e all the specified variables, so using these options only makes sense if all the variables have the same units. Moreover, making a good initial guess can noticeably reduce the time needed to fill in the holes. Generally variables should be filled one at a time.
#
# Each of the requested variables must have missing values specified
# according to CF Metadata conventions, namely one of the following:
# \c valid_range or both of \c valid_min and
# \c valid_max (if the values are in a specific range); one of
# \c valid_min (\c valid_max) if values are greater (less)
# than some value, or \c _FillValue. Also \c _FillValue is
# interpreted as \c valid_max if it is positive, and as
# \c valid_min otherwise, and the \c missing_value attribute is deprecated
# by the NetCDF User's Guide, but is supported for backward compatibility. For more information see
# <a href="http://www.unidata.ucar.edu/software/netcdf/guide_10.html#SEC76">NetCDF User's Guide: Attributes</a>.
# Run \verbatim fill_missing.py --help \endverbatim for the list of available
# command-line options.
# CK, 08/12/2008
from numpy import *
# Computes \f$\rho_{Jacobi}\f$, see formula (19.5.24), page 858.
def rho_jacobi(dimensions):
(J, L) = dimensions
return (cos(pi / J) + cos(pi / L)) / 2
# This makes the stencil wrap around the grid. It is unclear if this should be
# done, but it allows using a 4-point stencil for all points, even if they
# are on the edge of the grid (otherwise we need to use three points on the
# sides and two in the corners).
#
# Is and Js are arrays with row- and column-indices, M and N are the grid
# dimensions.
def fix_indices(Is, Js, dimensions):
(M, N) = dimensions
Is[Is == M] = 0
Is[Is == -1] = M - 1
Js[Js == N] = 0
Js[Js == -1] = N - 1
return (Is, Js)
# \brief laplace solves the Laplace equation
# \details laplace solves the Laplace equation using the SOR method with Chebyshev
# acceleration as described in 'Numerical Recipes in Fortran: the art of
# scientific computing' by William H. Press et al -- 2nd edition, section
# 19.5.
#
# data is a 2-d array (computation grid)
#
# mask is a boolean array; setting mask to 'data == 0', for example, results
# in only modifying points where 'data' is zero, all the other points
# are left as is. Intended use: if in an array the value of -9999.0
# signifies a missing value, then setting mask to 'data == -9999.0'
# fills in all the missing values.
#
# eps1 is the first stopping criterion: the iterations stop if the norm of
# residual becomes less than eps1*initial_norm, where 'initial_norm' is
# the initial norm of residual. Setting eps1 to zero or a negative
# number disables this stopping criterion.
#
# eps2 is the second stopping criterion: the iterations stop if the absolute
# value of the maximal change in value between successive iterations is
# less than eps2. Setting eps2 to zero or a negative number disables
# this stopping criterion.
#
# initial_guess is the initial guess used for all the values in the domain;
# the default is 'mean', i.e. use the mean of all the present values as
# the initial guess for missing values. initial_guess has to be 'mean'
# or a number.
#
# max_iter is the maximum number of iterations allowed. The default is 10000.
def laplace(data, mask, eps1, eps2, initial_guess='mean', max_iter=10000):
dimensions = data.shape
rjac = rho_jacobi(dimensions)
i, j = indices(dimensions)
# This splits the grid into 'odd' and 'even' parts, according to the
# checkerboard pattern:
odd = (i % 2 == 1) ^ (j % 2 == 0)
even = (i % 2 == 0) ^ (j % 2 == 0)
# odd and even parts _in_ the domain:
odd_part = list(zip(i[mask & odd], j[mask & odd]))
even_part = list(zip(i[mask & even], j[mask & even]))
# relative indices of the stencil points:
k = array([0, 1, 0, -1])
l = array([-1, 0, 1, 0])
parts = [odd_part, even_part]
try:
initial_guess = float(initial_guess)
except:
if initial_guess == 'mean':
present = array(ones_like(mask) - mask, dtype=bool)
initial_guess = mean(data[present])
else:
print("""ERROR: initial_guess of '%s' is not supported (it should be a number or 'mean').
Note: your data was not modified.""" % initial_guess)
return
data[mask] = initial_guess
print("Using the initial guess of %10f." % initial_guess)
# compute the initial norm of residual
initial_norm = 0.0
for m in [0, 1]:
for i, j in parts[m]:
Is, Js = fix_indices(i + k, j + l, dimensions)
xi = sum(data[Is, Js]) - 4 * data[i, j]
initial_norm += abs(xi)
print("Initial norm of residual =", initial_norm)
print("Criterion is (change < %f) OR (res norm < %f (initial norm))." % (eps2, eps1))
omega = 1.0
# The main loop:
for n in arange(max_iter):
anorm = 0.0
change = 0.0
for m in [0, 1]:
for i, j in parts[m]:
# stencil points:
Is, Js = fix_indices(i + k, j + l, dimensions)
residual = sum(data[Is, Js]) - 4 * data[i, j]
delta = omega * 0.25 * residual
data[i, j] += delta
# record the maximal change and the residual norm:
anorm += abs(residual)
if abs(delta) > change:
change = abs(delta)
# Chebyshev acceleration (see formula 19.5.30):
if n == 1 and m == 1:
omega = 1.0 / (1.0 - 0.5 * rjac ** 2)
else:
omega = 1.0 / (1.0 - 0.25 * rjac ** 2 * omega)
print("max change = %10f, residual norm = %10f" % (change, anorm))
if (anorm < eps1 * initial_norm) or (change < eps2):
print("Exiting with change=%f, anorm=%f after %d iteration(s)." % (change,
anorm, n + 1))
return
print("Exceeded the maximum number of iterations.")
return
if __name__ == "__main__":
from optparse import OptionParser
from sys import argv, exit
from shutil import copy, move
from tempfile import mkstemp
from os import close
from time import time, asctime
try:
from netCDF4 import Dataset as NC
except:
print("netCDF4 is not installed!")
sys.exit(1)
parser = OptionParser()
parser.usage = "%prog [options]"
parser.description = "Fills missing values in variables selected using -v in the file given by -f."
parser.add_option("-f", "--file", dest="input_filename",
help="input file")
parser.add_option("-v", "--vars", dest="variables",
help="comma-separated list of variables to process")
parser.add_option("-o", "--out_file", dest="output_filename",
help="output file")
parser.add_option("-e", "--eps", dest="eps",
help="convergence tolerance",
default="1.0")
parser.add_option("-i", "--initial_guess", dest="initial_guess",
help="initial guess to use; applies to all selected variables",
default="mean")
(options, args) = parser.parse_args()
if options.input_filename == "":
print("""Please specify the input file name
(using the -f or --file command line option).""")
exit(-1)
input_filename = options.input_filename
if options.variables == "":
print("""Please specify the list of variables to process
(using the -v or --variables command line option).""")
exit(-1)
variables = (options.variables).split(',')
if options.output_filename == "":
print("""Please specify the output file name
(using the -o or --out_file command line option).""")
exit(-1)
output_filename = options.output_filename
eps = float(options.eps)
# Done processing command-line options.
print("Creating the temporary file...")
try:
(handle, tmp_filename) = mkstemp()
close(handle) # mkstemp returns a file handle (which we don't need)
copy(input_filename, tmp_filename)
except IOError:
print("ERROR: Can't create %s, Exiting..." % tmp_filename)
try:
nc = NC(tmp_filename, 'a')
except Exception as message:
print(message)
print("Note: %s was not modified." % output_filename)
exit(-1)
# add history global attribute (after checking if present)
historysep = ' '
historystr = asctime() + ': ' + historysep.join(argv) + '\n'
if 'history' in nc.ncattrs():
nc.history = historystr + nc.history # prepend to history string
else:
nc.history = historystr
t_zero = time()
for name in variables:
print("Processing %s..." % name)
try:
var = nc.variables[name]
attributes = ["valid_range", "valid_min", "valid_max",
"_FillValue", "missing_value"]
adict = {}
print("Reading attributes...")
for attribute in attributes:
print("* %15s -- " % attribute, end=' ')
if attribute in var.ncattrs():
adict[attribute] = getattr(var, attribute)
print("found")
else:
print("not found")
if (var.ndim == 3):
nt = var.shape[0]
for t in range(0, nt):
print("\nInterpolating time step %i of %i\n" % (t, nt))
data = asarray(squeeze(var[t, :, :].data))
if "valid_range" in adict:
range = adict["valid_range"]
mask = ((data >= range[0]) & (data <= range[1]))
print("Using the valid_range attribute; range = ", range)
elif "valid_min" in adict and "valid_max" in adict:
valid_min = adict["valid_min"]
valid_max = adict["valid_max"]
mask = ((data < valid_min) | (data > valid_max))
print("""Using valid_min and valid_max attributes.
valid_min = %10f, valid_max = %10f.""" % (valid_min, valid_max))
elif "valid_min" in adict:
valid_min = adict["valid_min"]
mask = data < valid_min
print("Using the valid_min attribute; valid_min = %10f" % valid_min)
elif "valid_max" in adict:
valid_max = adict["valid_max"]
mask = data > valid_max
print("Using the valid_max attribute; valid_max = %10f" % valid_max)
elif "_FillValue" in adict:
fill_value = adict["_FillValue"]
if fill_value <= 0:
mask = data <= fill_value + 2 * finfo(float).eps
else:
mask = data >= fill_value - 2 * finfo(float).eps
print("Using the _FillValue attribute; _FillValue = %10f" % fill_value)
elif "missing_value" in adict:
missing = adict["missing_value"]
mask = abs(data - missing) < 2 * finfo(float).eps
print("""Using the missing_value attribute; missing_value = %10f
Warning: this attribute is deprecated by the NUG.""" % missing)
else:
print("No missing values found. Skipping this variable...")
continue
count = int(sum(mask))
if count == 0:
print("No missing values found. Skipping this variable...")
continue
print("Filling in %5d missing values..." % count)
t0 = time()
laplace(data, mask, -1, eps, initial_guess=options.initial_guess)
var[t, :, :] = data
# now REMOVE missing_value and _FillValue attributes
try:
delattr(var, '_FillValue')
except:
pass
try:
delattr(var, 'missing_value')
except:
pass
print("This took %5f seconds." % (time() - t0))
elif (var.ndim == 2):
data = asarray(squeeze(var[:]))
if "valid_range" in adict:
range = adict["valid_range"]
mask = ((data >= range[0]) & (data <= range[1]))
print("Using the valid_range attribute; range = ", range)
elif "valid_min" in adict and "valid_max" in adict:
valid_min = adict["valid_min"]
valid_max = adict["valid_max"]
mask = ((data < valid_min) | (data > valid_max))
print("""Using valid_min and valid_max attributes.
valid_min = %10f, valid_max = %10f.""" % (valid_min, valid_max))
elif "valid_min" in adict:
valid_min = adict["valid_min"]
mask = data < valid_min
print("Using the valid_min attribute; valid_min = %10f" % valid_min)
elif "valid_max" in adict:
valid_max = adict["valid_max"]
mask = data > valid_max
print("Using the valid_max attribute; valid_max = %10f" % valid_max)
elif "_FillValue" in adict:
fill_value = adict["_FillValue"]
if fill_value <= 0:
mask = data <= fill_value + 2 * finfo(float).eps
else:
mask = data >= fill_value - 2 * finfo(float).eps
print("Using the _FillValue attribute; _FillValue = %10f" % fill_value)
elif "missing_value" in adict:
missing = adict["missing_value"]
mask = abs(data - missing) < 2 * finfo(float).eps
print("""Using the missing_value attribute; missing_value = %10f
Warning: this attribute is deprecated by the NUG.""" % missing)
else:
print("No missing values found. Skipping this variable...")
continue
count = int(sum(mask))
if count == 0:
print("No missing values found. Skipping this variable...")
continue
print("Filling in %5d missing values..." % count)
t0 = time()
laplace(data, mask, -1, eps, initial_guess=options.initial_guess)
var[:] = data
# now REMOVE missing_value and _FillValue attributes
try:
delattr(var, '_FillValue')
except:
pass
try:
delattr(var, 'missing_value')
except:
pass
print("This took %5f seconds." % (time() - t0))
else:
print('wrong shape')
except Exception as message:
print("ERROR:", message)
print("Note: %s was not modified." % output_filename)
exit(-1)
print("Processing all the variables took %5f seconds." % (time() - t_zero))
nc.close()
try:
move(tmp_filename, output_filename)
except:
print("Error moving %s to %s. Exiting..." % (tmp_filename,
output_filename))
exit(-1)
|
pism/pism
|
util/fill_missing.py
|
Python
|
gpl-3.0
| 17,216
|
[
"NetCDF"
] |
28e24f49d6e0c33a668d77b2c89eb7069c4d9fcddf08ddca52f7c66b2cf97a34
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2016-10-13 04:13:10
# @Last modified by: Brian Cherinka
# @Last Modified time: 2016-11-29 17:13:34
from __future__ import print_function, division, absolute_import
|
bretthandrews/marvin
|
python/marvin/core/__init__.py
|
Python
|
bsd-3-clause
| 297
|
[
"Brian"
] |
3851d3fd117b28f3940b6dabfe0a16b40015daeeb902d934f3558e664a019bd8
|
# $Id$
#
# Copyright (C) 2007-2008 by Greg Landrum
# All rights reserved
#
from rdkit import RDLogger
logger = RDLogger.logger()
from rdkit import Chem,Geometry
import numpy
from rdkit.Numerics import Alignment
from rdkit.Chem.Subshape import SubshapeObjects
class SubshapeAlignment(object):
transform=None
triangleSSD=None
targetTri=None
queryTri=None
alignedConfId=-1
dirMatch=0.0
shapeDist=0.0
def _getAllTriangles(pts,orderedTraversal=False):
for i in range(len(pts)):
if orderedTraversal:
jStart=i+1
else:
jStart=0
for j in range(jStart,len(pts)):
if j==i:
continue
if orderedTraversal:
kStart=j+1
else:
kStart=0
for k in range(j+1,len(pts)):
if k==i or k==j:
continue
yield (i,j,k)
class SubshapeDistanceMetric(object):
TANIMOTO=0
PROTRUDE=1
# returns the distance between two shapea according to the provided metric
def GetShapeShapeDistance(s1,s2,distMetric):
if distMetric==SubshapeDistanceMetric.PROTRUDE:
#print s1.grid.GetOccupancyVect().GetTotalVal(),s2.grid.GetOccupancyVect().GetTotalVal()
if s1.grid.GetOccupancyVect().GetTotalVal()<s2.grid.GetOccupancyVect().GetTotalVal():
d = Geometry.ProtrudeDistance(s1.grid,s2.grid)
#print d
else:
d = Geometry.ProtrudeDistance(s2.grid,s1.grid)
else:
d = Geometry.TanimotoDistance(s1.grid,s2.grid)
return d
# clusters a set of alignments and returns the cluster centroid
def ClusterAlignments(mol,alignments,builder,
neighborTol=0.1,
distMetric=SubshapeDistanceMetric.PROTRUDE,
tempConfId=1001):
from rdkit.ML.Cluster import Butina
dists = []
for i in range(len(alignments)):
TransformMol(mol,alignments[i].transform,newConfId=tempConfId)
shapeI=builder.GenerateSubshapeShape(mol,tempConfId,addSkeleton=False)
for j in range(i):
TransformMol(mol,alignments[j].transform,newConfId=tempConfId+1)
shapeJ=builder.GenerateSubshapeShape(mol,tempConfId+1,addSkeleton=False)
d = GetShapeShapeDistance(shapeI,shapeJ,distMetric)
dists.append(d)
mol.RemoveConformer(tempConfId+1)
mol.RemoveConformer(tempConfId)
clusts=Butina.ClusterData(dists,len(alignments),neighborTol,isDistData=True)
res = [alignments[x[0]] for x in clusts]
return res
def TransformMol(mol,tform,confId=-1,newConfId=100):
""" Applies the transformation to a molecule and sets it up with
a single conformer
"""
newConf = Chem.Conformer()
newConf.SetId(0)
refConf = mol.GetConformer(confId)
for i in range(refConf.GetNumAtoms()):
pos = list(refConf.GetAtomPosition(i))
pos.append(1.0)
newPos = numpy.dot(tform,numpy.array(pos))
newConf.SetAtomPosition(i,list(newPos)[:3])
newConf.SetId(newConfId)
mol.RemoveConformer(newConfId)
mol.AddConformer(newConf,assignId=False)
class SubshapeAligner(object):
triangleRMSTol=1.0
distMetric=SubshapeDistanceMetric.PROTRUDE
shapeDistTol=0.2
numFeatThresh=3
dirThresh=2.6
edgeTol=6.0
#coarseGridToleranceMult=1.5
#medGridToleranceMult=1.25
coarseGridToleranceMult=1.0
medGridToleranceMult=1.0
def GetTriangleMatches(self,target,query):
""" this is a generator function returning the possible triangle
matches between the two shapes
"""
ssdTol = (self.triangleRMSTol**2)*9
res = []
tgtPts = target.skelPts
queryPts = query.skelPts
tgtLs = {}
for i in range(len(tgtPts)):
for j in range(i+1,len(tgtPts)):
l2 = (tgtPts[i].location-tgtPts[j].location).LengthSq()
tgtLs[(i,j)]=l2
queryLs = {}
for i in range(len(queryPts)):
for j in range(i+1,len(queryPts)):
l2 = (queryPts[i].location-queryPts[j].location).LengthSq()
queryLs[(i,j)]=l2
compatEdges={}
tol2 = self.edgeTol*self.edgeTol
for tk,tv in tgtLs.iteritems():
for qk,qv in queryLs.iteritems():
if abs(tv-qv)<tol2:
compatEdges[(tk,qk)]=1
seqNo=0
for tgtTri in _getAllTriangles(tgtPts,orderedTraversal=True):
tgtLocs=[tgtPts[x].location for x in tgtTri]
for queryTri in _getAllTriangles(queryPts,orderedTraversal=False):
if compatEdges.has_key(((tgtTri[0],tgtTri[1]),(queryTri[0],queryTri[1]))) and \
compatEdges.has_key(((tgtTri[0],tgtTri[2]),(queryTri[0],queryTri[2]))) and \
compatEdges.has_key(((tgtTri[1],tgtTri[2]),(queryTri[1],queryTri[2]))):
queryLocs=[queryPts[x].location for x in queryTri]
ssd,tf = Alignment.GetAlignmentTransform(tgtLocs,queryLocs)
if ssd<=ssdTol:
alg = SubshapeAlignment()
alg.transform=tf
alg.triangleSSD=ssd
alg.targetTri=tgtTri
alg.queryTri=queryTri
alg._seqNo=seqNo
seqNo+=1
yield alg
def _checkMatchFeatures(self,targetPts,queryPts,alignment):
nMatched=0
for i in range(3):
tgtFeats = targetPts[alignment.targetTri[i]].molFeatures
qFeats = queryPts[alignment.queryTri[i]].molFeatures
if not tgtFeats and not qFeats:
nMatched+=1
else:
for j,jFeat in enumerate(tgtFeats):
if jFeat in qFeats:
nMatched+=1
break
if nMatched>=self.numFeatThresh:
break
return nMatched>=self.numFeatThresh
def PruneMatchesUsingFeatures(self,target,query,alignments,pruneStats=None):
i = 0
targetPts = target.skelPts
queryPts = query.skelPts
while i<len(alignments):
alg = alignments[i]
if not self._checkMatchFeatures(targetPts,queryPts,alg):
if pruneStats is not None:
pruneStats['features']=pruneStats.get('features',0)+1
del alignments[i]
else:
i+=1
def _checkMatchDirections(self,targetPts,queryPts,alignment):
dot = 0.0
for i in range(3):
tgtPt = targetPts[alignment.targetTri[i]]
queryPt = queryPts[alignment.queryTri[i]]
qv = queryPt.shapeDirs[0]
tv = tgtPt.shapeDirs[0]
rotV =[0.0]*3
rotV[0] = alignment.transform[0,0]*qv[0]+alignment.transform[0,1]*qv[1]+alignment.transform[0,2]*qv[2]
rotV[1] = alignment.transform[1,0]*qv[0]+alignment.transform[1,1]*qv[1]+alignment.transform[1,2]*qv[2]
rotV[2] = alignment.transform[2,0]*qv[0]+alignment.transform[2,1]*qv[1]+alignment.transform[2,2]*qv[2]
dot += abs(rotV[0]*tv[0]+rotV[1]*tv[1]+rotV[2]*tv[2])
if dot>=self.dirThresh:
# already above the threshold, no need to continue
break
alignment.dirMatch=dot
return dot>=self.dirThresh
def PruneMatchesUsingDirection(self,target,query,alignments,pruneStats=None):
i = 0
tgtPts = target.skelPts
queryPts = query.skelPts
while i<len(alignments):
if not self._checkMatchDirections(tgtPts,queryPts,alignments[i]):
if pruneStats is not None:
pruneStats['direction']=pruneStats.get('direction',0)+1
del alignments[i]
else:
i+=1
def _addCoarseAndMediumGrids(self,mol,tgt,confId,builder):
oSpace=builder.gridSpacing
if mol:
builder.gridSpacing = oSpace*1.5
tgt.medGrid = builder.GenerateSubshapeShape(mol,confId,addSkeleton=False)
builder.gridSpacing = oSpace*2
tgt.coarseGrid = builder.GenerateSubshapeShape(mol,confId,addSkeleton=False)
builder.gridSpacing = oSpace
else:
tgt.medGrid = builder.SampleSubshape(tgt,oSpace*1.5)
tgt.coarseGrid = builder.SampleSubshape(tgt,oSpace*2.0)
def _checkMatchShape(self,targetMol,target,queryMol,query,alignment,builder,
targetConf,queryConf,pruneStats=None,tConfId=1001):
matchOk=True
TransformMol(queryMol,alignment.transform,confId=queryConf,newConfId=tConfId)
oSpace=builder.gridSpacing
builder.gridSpacing=oSpace*2
coarseGrid=builder.GenerateSubshapeShape(queryMol,tConfId,addSkeleton=False)
d = GetShapeShapeDistance(coarseGrid,target.coarseGrid,self.distMetric)
if d>self.shapeDistTol*self.coarseGridToleranceMult:
matchOk=False
if pruneStats is not None:
pruneStats['coarseGrid']=pruneStats.get('coarseGrid',0)+1
else:
builder.gridSpacing=oSpace*1.5
medGrid=builder.GenerateSubshapeShape(queryMol,tConfId,addSkeleton=False)
d = GetShapeShapeDistance(medGrid,target.medGrid,self.distMetric)
if d>self.shapeDistTol*self.medGridToleranceMult:
matchOk=False
if pruneStats is not None:
pruneStats['medGrid']=pruneStats.get('medGrid',0)+1
else:
builder.gridSpacing=oSpace
fineGrid=builder.GenerateSubshapeShape(queryMol,tConfId,addSkeleton=False)
d = GetShapeShapeDistance(fineGrid,target,self.distMetric)
#print ' ',d
if d>self.shapeDistTol:
matchOk=False
if pruneStats is not None:
pruneStats['fineGrid']=pruneStats.get('fineGrid',0)+1
alignment.shapeDist=d
queryMol.RemoveConformer(tConfId)
builder.gridSpacing=oSpace
return matchOk
def PruneMatchesUsingShape(self,targetMol,target,queryMol,query,builder,
alignments,tgtConf=-1,queryConf=-1,
pruneStats=None):
if not hasattr(target,'medGrid'):
self._addCoarseAndMediumGrids(targetMol,target,tgtConf,builder)
logger.info("Shape-based Pruning")
i=0
nOrig = len(alignments)
nDone=0
while i < len(alignments):
removeIt=False
alg = alignments[i]
nDone+=1
if not nDone%100:
nLeft = len(alignments)
logger.info(' processed %d of %d. %d alignments remain'%((nDone,
nOrig,
nLeft)))
if not self._checkMatchShape(targetMol,target,queryMol,query,alg,builder,
targetConf=tgtConf,queryConf=queryConf,
pruneStats=pruneStats):
del alignments[i]
else:
i+=1
def GetSubshapeAlignments(self,targetMol,target,queryMol,query,builder,
tgtConf=-1,queryConf=-1,pruneStats=None):
import time
if pruneStats is None:
pruneStats={}
logger.info("Generating triangle matches")
t1=time.time()
res = [x for x in self.GetTriangleMatches(target,query)]
t2=time.time()
logger.info("Got %d possible alignments in %.1f seconds"%(len(res),t2-t1))
pruneStats['gtm_time']=t2-t1
if builder.featFactory:
logger.info("Doing feature pruning")
t1 = time.time()
self.PruneMatchesUsingFeatures(target,query,res,pruneStats=pruneStats)
t2 = time.time()
pruneStats['feats_time']=t2-t1
logger.info("%d possible alignments remain. (%.1f seconds required)"%(len(res),t2-t1))
logger.info("Doing direction pruning")
t1 = time.time()
self.PruneMatchesUsingDirection(target,query,res,pruneStats=pruneStats)
t2 = time.time()
pruneStats['direction_time']=t2-t1
logger.info("%d possible alignments remain. (%.1f seconds required)"%(len(res),t2-t1))
t1 = time.time()
self.PruneMatchesUsingShape(targetMol,target,queryMol,query,builder,res,
tgtConf=tgtConf,queryConf=queryConf,
pruneStats=pruneStats)
t2 = time.time()
pruneStats['shape_time']=t2-t1
return res
def __call__(self,targetMol,target,queryMol,query,builder,
tgtConf=-1,queryConf=-1,pruneStats=None):
for alignment in self.GetTriangleMatches(target,query):
if builder.featFactory and \
not self._checkMatchFeatures(target.skelPts,query.skelPts,alignment):
if pruneStats is not None:
pruneStats['features']=pruneStats.get('features',0)+1
continue
if not self._checkMatchDirections(target.skelPts,query.skelPts,alignment):
if pruneStats is not None:
pruneStats['direction']=pruneStats.get('direction',0)+1
continue
if not hasattr(target,'medGrid'):
self._addCoarseAndMediumGrids(targetMol,target,tgtConf,builder)
if not self._checkMatchShape(targetMol,target,queryMol,query,alignment,builder,
targetConf=tgtConf,queryConf=queryConf,
pruneStats=pruneStats):
continue
# if we made it this far, it's a good alignment
yield alignment
if __name__=='__main__':
import cPickle
tgtMol,tgtShape = cPickle.load(file('target.pkl','rb'))
queryMol,queryShape = cPickle.load(file('query.pkl','rb'))
builder = cPickle.load(file('builder.pkl','rb'))
aligner = SubshapeAligner()
algs = aligner.GetSubshapeAlignments(tgtMol,tgtShape,queryMol,queryShape,builder)
print len(algs)
from rdkit.Chem.PyMol import MolViewer
v = MolViewer()
v.ShowMol(tgtMol,name='Target',showOnly=True)
v.ShowMol(queryMol,name='Query',showOnly=False)
SubshapeObjects.DisplaySubshape(v,tgtShape,'target_shape',color=(.8,.2,.2))
SubshapeObjects.DisplaySubshape(v,queryShape,'query_shape',color=(.2,.2,.8))
|
rdkit/rdkit-orig
|
rdkit/Chem/Subshape/SubshapeAligner.py
|
Python
|
bsd-3-clause
| 13,196
|
[
"PyMOL",
"RDKit"
] |
c38c1afce488930ea6e7cd9fef3f18dd754eaacd343ff7e077cdc6afa106ed32
|
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing OpenFOAM, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Xavier Besseron (University of Luxembourg)
@author: Ward Poelmans (Ghent University)
@author: Balazs Hajgato (Free University Brussels (VUB))
"""
import glob
import os
import re
import shutil
import stat
import tempfile
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import setup_cmake_env
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
from easybuild.tools.systemtools import get_shared_lib_ext, get_cpu_architecture, AARCH64, POWER
class EB_OpenFOAM(EasyBlock):
"""Support for building and installing OpenFOAM."""
def __init__(self, *args, **kwargs):
"""Specify that OpenFOAM should be built in install dir."""
super(EB_OpenFOAM, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.wm_compiler = None
self.wm_mplib = None
self.openfoamdir = None
self.thrdpartydir = None
# version may start with 'v' for some variants of OpenFOAM
# we need to strip this off to avoid problems when comparing LooseVersion instances in Python 3
clean_version = self.version.strip('v+')
# take into account versions like '4.x',
# assume it's equivalent to a very recent minor version (.99)
if '.x' in clean_version:
clean_version = clean_version.replace('.x', '.99')
self.looseversion = LooseVersion(clean_version)
if 'extend' in self.name.lower():
if self.looseversion >= LooseVersion('3.0'):
self.openfoamdir = 'foam-extend-%s' % self.version
else:
self.openfoamdir = 'OpenFOAM-%s-ext' % self.version
else:
self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])])
self.log.debug("openfoamdir: %s" % self.openfoamdir)
# Set build type to requested value
if self.toolchain.options['debug']:
self.build_type = 'Debug'
else:
self.build_type = 'Opt'
def extract_step(self):
"""Extract sources as expected by the OpenFOAM(-Extend) build scripts."""
super(EB_OpenFOAM, self).extract_step()
# make sure that the expected subdir is really there after extracting
# if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail
openfoam_installdir = os.path.join(self.installdir, self.openfoamdir)
if not os.path.exists(openfoam_installdir):
self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir)
try:
contents_installdir = os.listdir(self.installdir)
source = os.path.join(self.installdir, contents_installdir[0])
# it's one directory but has a wrong name
if len(contents_installdir) == 1 and os.path.isdir(source):
target = os.path.join(self.installdir, self.openfoamdir)
self.log.debug("Renaming %s to %s", source, target)
os.rename(source, target)
else:
mkdir(openfoam_installdir)
for fil in contents_installdir:
if fil != self.openfoamdir:
source = os.path.join(self.installdir, fil)
target = os.path.join(openfoam_installdir, fil)
self.log.debug("Moving %s to %s", source, target)
shutil.move(source, target)
os.chdir(openfoam_installdir)
except OSError as err:
raise EasyBuildError("Failed to move all files to %s: %s", openfoam_installdir, err)
def patch_step(self, beginpath=None):
"""Adjust start directory and start path for patching to correct directory."""
self.cfg['start_dir'] = os.path.join(self.installdir, self.openfoamdir)
super(EB_OpenFOAM, self).patch_step(beginpath=self.cfg['start_dir'])
def prepare_step(self, *args, **kwargs):
"""Prepare for OpenFOAM install procedure."""
super(EB_OpenFOAM, self).prepare_step(*args, **kwargs)
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC: # @UndefinedVariable
self.wm_compiler = 'Gcc'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
self.wm_compiler = 'Icc'
else:
raise EasyBuildError("Unknown compiler family, don't know how to set WM_COMPILER")
# set to an MPI unknown by OpenFOAM, since we're handling the MPI settings ourselves (via mpicc, etc.)
# Note: this name must contain 'MPI' so the MPI version of the
# Pstream library is built (cf src/Pstream/Allwmake)
self.wm_mplib = "EASYBUILDMPI"
def configure_step(self):
"""Configure OpenFOAM build by setting appropriate environment variables."""
# compiler & compiler flags
comp_fam = self.toolchain.comp_family()
extra_flags = ''
if comp_fam == toolchain.GCC: # @UndefinedVariable
if get_software_version('GCC') >= LooseVersion('4.8'):
# make sure non-gold version of ld is used, since OpenFOAM requires it
# see http://www.openfoam.org/mantisbt/view.php?id=685
extra_flags = '-fuse-ld=bfd'
# older versions of OpenFOAM-Extend require -fpermissive
if 'extend' in self.name.lower() and self.looseversion < LooseVersion('2.0'):
extra_flags += ' -fpermissive'
if self.looseversion < LooseVersion('3.0'):
extra_flags += ' -fno-delete-null-pointer-checks'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
# make sure -no-prec-div is used with Intel compilers
extra_flags = '-no-prec-div'
for env_var in ['CFLAGS', 'CXXFLAGS']:
env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags))
# patch out hardcoding of WM_* environment variables
# for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER'
for script in [os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc']]:
self.log.debug("Patching out hardcoded $WM_* env vars in %s", script)
# disable any third party stuff, we use EB controlled builds
regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")]
# this does not work for OpenFOAM Extend lower than 2.0
if 'extend' not in self.name.lower() or self.looseversion >= LooseVersion('2.0'):
key = "WM_PROJECT_VERSION"
regex_subs += [(r"^(setenv|export) %s=.*$" % key, r"export %s=%s #\g<0>" % (key, self.version))]
WM_env_var = ['WM_COMPILER', 'WM_COMPILE_OPTION', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR']
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
WM_env_var.append('WM_LABEL_SIZE')
for env_var in WM_env_var:
regex_subs.append((r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var,
r": ${\g<var>:=\g<val>}; export \g<var>"))
apply_regex_substitutions(script, regex_subs)
# inject compiler variables into wmake/rules files
ldirs = glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*'))
if self.looseversion >= LooseVersion('1906'):
ldirs += glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'General', '*'))
langs = ['c', 'c++']
# NOTE: we do not want to change the Debug rules files becuse
# that would change the cOPT/c++OPT values from their empty setting.
suffixes = ['', 'Opt']
wmake_rules_files = [os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes]
mpicc = os.environ['MPICC']
mpicxx = os.environ['MPICXX']
cc_seq = os.environ.get('CC_SEQ', os.environ['CC'])
cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX'])
if self.toolchain.mpi_family() == toolchain.OPENMPI:
# no -cc/-cxx flags supported in OpenMPI compiler wrappers
c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc)
cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx)
else:
# -cc/-cxx should work for all MPICH-based MPIs (including Intel MPI)
c_comp_cmd = '%s -cc="%s"' % (mpicc, cc_seq)
cxx_comp_cmd = '%s -cxx="%s"' % (mpicxx, cxx_seq)
comp_vars = {
# specify MPI compiler wrappers and compiler commands + sequential compiler that should be used by them
'cc': c_comp_cmd,
'CC': cxx_comp_cmd,
'cOPT': os.environ['CFLAGS'],
'c++OPT': os.environ['CXXFLAGS'],
}
for wmake_rules_file in wmake_rules_files:
# the cOpt and c++Opt files don't exist in the General directories (which are included for recent versions)
if not os.path.isfile(wmake_rules_file):
continue
fullpath = os.path.join(self.builddir, self.openfoamdir, wmake_rules_file)
self.log.debug("Patching compiler variables in %s", fullpath)
regex_subs = []
for comp_var, newval in comp_vars.items():
regex_subs.append((r"^(%s\s*=\s*).*$" % re.escape(comp_var), r"\1%s" % newval))
apply_regex_substitutions(fullpath, regex_subs)
# enable verbose build for debug purposes
# starting with openfoam-extend 3.2, PS1 also needs to be set
env.setvar("FOAM_VERBOSE", '1')
# installation directory
env.setvar("FOAM_INST_DIR", self.installdir)
# third party directory
self.thrdpartydir = "ThirdParty-%s" % self.version
# only if third party stuff is actually installed
if os.path.exists(self.thrdpartydir):
os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir)
env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir))
env.setvar("WM_COMPILER", self.wm_compiler)
env.setvar("WM_MPLIB", self.wm_mplib)
# Set Compile options according to build type
env.setvar("WM_COMPILE_OPTION", self.build_type)
# parallel build spec
env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel']))
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env.setvar("WM_LABEL_SIZE", '64')
else:
env.setvar("WM_LABEL_SIZE", '32')
# make sure lib/include dirs for dependencies are found
openfoam_extend_v3 = 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0')
if self.looseversion < LooseVersion("2") or openfoam_extend_v3:
self.log.debug("List of deps: %s" % self.cfg.dependencies())
for dep in self.cfg.dependencies():
dep_name = dep['name'].upper(),
dep_root = get_software_root(dep['name'])
env.setvar("%s_SYSTEM" % dep_name, "1")
dep_vars = {
"%s_DIR": "%s",
"%s_BIN_DIR": "%s/bin",
"%s_LIB_DIR": "%s/lib",
"%s_INCLUDE_DIR": "%s/include",
}
for var, val in dep_vars.items():
env.setvar(var % dep_name, val % dep_root)
else:
for depend in ['SCOTCH', 'METIS', 'CGAL', 'Paraview']:
dependloc = get_software_root(depend)
if dependloc:
if depend == 'CGAL' and get_software_root('Boost'):
env.setvar("CGAL_ROOT", dependloc)
env.setvar("BOOST_ROOT", get_software_root('Boost'))
else:
env.setvar("%s_ROOT" % depend.upper(), dependloc)
def build_step(self):
"""Build OpenFOAM using make after sourcing script to set environment."""
# Some parts of OpenFOAM uses CMake to build
# make sure the basic environment is correct
setup_cmake_env(self.toolchain)
precmd = "source %s" % os.path.join(self.builddir, self.openfoamdir, "etc", "bashrc")
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('4.0'):
if self.looseversion >= LooseVersion('2006'):
cleancmd = "cd $WM_PROJECT_DIR && wclean -platform -all && cd -"
else:
cleancmd = "cd $WM_PROJECT_DIR && wcleanPlatform -all && cd -"
else:
cleancmd = "wcleanAll"
# make directly in install directory
cmd_tmpl = "%(precmd)s && %(cleancmd)s && %(prebuildopts)s %(makecmd)s" % {
'precmd': precmd,
'cleancmd': cleancmd,
'prebuildopts': self.cfg['prebuildopts'],
'makecmd': os.path.join(self.builddir, self.openfoamdir, '%s'),
}
if 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
qa = {
"Proceed without compiling ParaView [Y/n]": 'Y',
"Proceed without compiling cudaSolvers? [Y/n]": 'Y',
}
noqa = [
".* -o .*",
"checking .*",
"warning.*",
"configure: creating.*",
"%s .*" % os.environ['CC'],
"wmake .*",
"Making dependency list for source file.*",
r"\s*\^\s*", # warning indicator
"Cleaning .*",
]
run_cmd_qa(cmd_tmpl % 'Allwmake.firstInstall', qa, no_qa=noqa, log_all=True, simple=True, maxhits=500)
else:
cmd = 'Allwmake'
if self.looseversion > LooseVersion('1606'):
# use Allwmake -log option if possible since this can be useful during builds, but also afterwards
cmd += ' -log'
run_cmd(cmd_tmpl % cmd, log_all=True, simple=True, log_output=True)
def det_psubdir(self):
"""Determine the platform-specific installation directory for OpenFOAM."""
# OpenFOAM >= 3.0.0 can use 64 bit integers
# same goes for OpenFOAM-Extend >= 4.1
if 'extend' in self.name.lower():
set_int_size = self.looseversion >= LooseVersion('4.1')
else:
set_int_size = self.looseversion >= LooseVersion('3.0')
if set_int_size:
if self.toolchain.options['i8']:
int_size = 'Int64'
else:
int_size = 'Int32'
else:
int_size = ''
archpart = '64'
arch = get_cpu_architecture()
if arch == AARCH64:
# Variants have different abbreviations for ARM64...
if self.looseversion < LooseVersion("100"):
archpart = 'Arm64'
else:
archpart = 'ARM64'
elif arch == POWER:
archpart = 'PPC64le'
psubdir = "linux%s%sDP%s%s" % (archpart, self.wm_compiler, int_size, self.build_type)
return psubdir
def install_step(self):
"""Building was performed in install dir, so just fix permissions."""
# fix permissions of OpenFOAM dir
fullpath = os.path.join(self.installdir, self.openfoamdir)
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
# fix permissions of ThirdParty dir and subdirs (also for 2.x)
# if the thirdparty tarball is installed
fullpath = os.path.join(self.installdir, self.thrdpartydir)
if os.path.exists(fullpath):
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
# create symlinks in the lib directory to all libraries in the mpi subdirectory
# to make sure they take precedence over the libraries in the dummy subdirectory
shlib_ext = get_shared_lib_ext()
psubdir = self.det_psubdir()
openfoam_extend_v3 = 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0')
if openfoam_extend_v3 or self.looseversion < LooseVersion("2"):
libdir = os.path.join(self.installdir, self.openfoamdir, "lib", psubdir)
else:
libdir = os.path.join(self.installdir, self.openfoamdir, "platforms", psubdir, "lib")
# OpenFOAM v2012 puts mpi into eb-mpi
if self.looseversion >= LooseVersion("2012"):
mpilibssubdir = "eb-mpi"
else:
mpilibssubdir = "mpi"
mpilibsdir = os.path.join(libdir, mpilibssubdir)
if os.path.exists(mpilibsdir):
for lib in glob.glob(os.path.join(mpilibsdir, "*.%s" % shlib_ext)):
libname = os.path.basename(lib)
dst = os.path.join(libdir, libname)
os.symlink(os.path.join(mpilibssubdir, libname), dst)
def sanity_check_step(self):
"""Custom sanity check for OpenFOAM"""
shlib_ext = get_shared_lib_ext()
psubdir = self.det_psubdir()
openfoam_extend_v3 = 'extend' in self.name.lower() and self.looseversion >= LooseVersion('3.0')
if openfoam_extend_v3 or self.looseversion < LooseVersion("2"):
toolsdir = os.path.join(self.openfoamdir, "applications", "bin", psubdir)
libsdir = os.path.join(self.openfoamdir, "lib", psubdir)
dirs = [toolsdir, libsdir]
else:
toolsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "bin")
libsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "lib")
dirs = [toolsdir, libsdir]
# some randomly selected binaries
# if one of these is missing, it's very likely something went wrong
bins = [os.path.join(self.openfoamdir, "bin", x) for x in ["paraFoam"]] + \
[os.path.join(toolsdir, "buoyantSimpleFoam")] + \
[os.path.join(toolsdir, "%sFoam" % x) for x in ["boundary", "engine"]] + \
[os.path.join(toolsdir, "surface%s" % x) for x in ["Add", "Find", "Smooth"]] + \
[os.path.join(toolsdir, x) for x in ['blockMesh', 'checkMesh', 'deformedGeom', 'engineSwirl',
'modifyMesh', 'refineMesh']]
# test setting up the OpenFOAM environment in bash shell
load_openfoam_env = "source $FOAM_BASH"
custom_commands = [load_openfoam_env]
# only include Boussinesq and sonic since for OpenFOAM < 7, since those solvers have been deprecated
if self.looseversion < LooseVersion('7'):
bins.extend([
os.path.join(toolsdir, "buoyantBoussinesqSimpleFoam"),
os.path.join(toolsdir, "sonicFoam")
])
# check for the Pstream and scotchDecomp libraries, there must be a dummy one and an mpi one
if 'extend' in self.name.lower():
libs = [os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext),
os.path.join(libsdir, "libmetisDecomp.%s" % shlib_ext)]
if self.looseversion < LooseVersion('3.2'):
# Pstream should have both a dummy and a mpi one
libs.extend([os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", "mpi"]])
libs.extend([os.path.join(libsdir, "mpi", "libparMetisDecomp.%s" % shlib_ext)])
else:
libs.extend([os.path.join(libsdir, "libparMetisDecomp.%s" % shlib_ext)])
else:
# OpenFOAM v2012 puts mpi into eb-mpi
if self.looseversion >= LooseVersion("2012"):
mpilibssubdir = "eb-mpi"
else:
mpilibssubdir = "mpi"
# there must be a dummy one and an mpi one for both
libs = [os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", mpilibssubdir]] + \
[os.path.join(libsdir, x, "libptscotchDecomp.%s" % shlib_ext) for x in ["dummy", mpilibssubdir]] +\
[os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext)] + \
[os.path.join(libsdir, "dummy", "libscotchDecomp.%s" % shlib_ext)]
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion("2.3.0"):
# surfaceSmooth is replaced by surfaceLambdaMuSmooth is OpenFOAM v2.3.0
bins.remove(os.path.join(toolsdir, "surfaceSmooth"))
bins.append(os.path.join(toolsdir, "surfaceLambdaMuSmooth"))
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion("2.4.0"):
# also check for foamMonitor for OpenFOAM versions other than OpenFOAM-Extend
bins.append(os.path.join(self.openfoamdir, 'bin', 'foamMonitor'))
# test foamMonitor; wrap `foamMonitor -h` to generate exit code 1 if any dependency is missing
# the command `foamMonitor -h` does not return correct exit codes on its own in all versions
test_foammonitor = "! foamMonitor -h 2>&1 | grep 'not installed'"
custom_commands.append(' && '.join([load_openfoam_env, test_foammonitor]))
custom_paths = {
'files': [os.path.join(self.openfoamdir, 'etc', x) for x in ["bashrc", "cshrc"]] + bins + libs,
'dirs': dirs,
}
# run motorBike tutorial case to ensure the installation is functional (if it's available);
# only for recent (>= v6.0) versions of openfoam.org variant
if self.looseversion >= LooseVersion('6') and self.looseversion < LooseVersion('100'):
openfoamdir_path = os.path.join(self.installdir, self.openfoamdir)
motorbike_path = os.path.join(openfoamdir_path, 'tutorials', 'incompressible', 'simpleFoam', 'motorBike')
if os.path.exists(motorbike_path):
test_dir = tempfile.mkdtemp()
if self.looseversion >= LooseVersion('9'):
geom_target_dir = 'geometry'
else:
geom_target_dir = 'triSurface'
cmds = [
"cp -a %s %s" % (motorbike_path, test_dir),
"cd %s" % os.path.join(test_dir, os.path.basename(motorbike_path)),
"source $FOAM_BASH",
". $WM_PROJECT_DIR/bin/tools/RunFunctions",
"cp $FOAM_TUTORIALS/resources/geometry/motorBike.obj.gz constant/%s/" % geom_target_dir,
"runApplication surfaceFeatures",
"runApplication blockMesh",
"runApplication decomposePar -copyZero",
"runParallel snappyHexMesh -overwrite",
"runParallel patchSummary",
"runParallel potentialFoam",
"runParallel simpleFoam",
"runApplication reconstructParMesh -constant",
"runApplication reconstructPar -latestTime",
"cd %s" % self.builddir,
"rm -r %s" % test_dir,
]
# all commands need to be run in a single shell command,
# because sourcing $FOAM_BASH sets up environment
custom_commands.append(' && '.join(cmds))
super(EB_OpenFOAM, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
def make_module_extra(self, altroot=None, altversion=None):
"""Define extra environment variables required by OpenFOAM"""
txt = super(EB_OpenFOAM, self).make_module_extra()
env_vars = [
# Set WM_COMPILE_OPTION in the module file
# $FOAM_BASH will then pick it up correctly.
('WM_COMPILE_OPTION', self.build_type),
('WM_PROJECT_VERSION', self.version),
('FOAM_INST_DIR', self.installdir),
('WM_COMPILER', self.wm_compiler),
('WM_MPLIB', self.wm_mplib),
('FOAM_BASH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'bashrc')),
('FOAM_CSH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'cshrc')),
]
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and self.looseversion >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env_vars += [('WM_LABEL_SIZE', '64')]
else:
env_vars += [('WM_LABEL_SIZE', '32')]
for (env_var, val) in env_vars:
# check whether value is defined for compatibility with --module-only
if val:
txt += self.module_generator.set_environment(env_var, val)
return txt
|
akesandgren/easybuild-easyblocks
|
easybuild/easyblocks/o/openfoam.py
|
Python
|
gpl-2.0
| 26,981
|
[
"ParaView"
] |
ed1a916fe4492a4b888f0e555550084b62cd806ca5f05ffd444b9d8bce2258ba
|
""" Contains unit tests of NetworkAgent module
"""
import unittest
import sys
import DIRAC.AccountingSystem.Agent.NetworkAgent as module
from mock.mock import MagicMock
__RCSID__ = "$Id$"
MQURI1 = 'mq.dirac.net::Topics::perfsonar.summary.packet-loss-rate'
MQURI2 = 'mq.dirac.net::Queues::perfsonar.summary.histogram-owdelay'
ROOT_PATH = '/Resources/Sites'
SITE1 = 'LCG.Dirac.net'
SITE2 = 'LCG.DiracToRemove.net'
SITE3 = 'VAC.DiracToAdd.org'
SITE1_HOST1 = 'perfsonar.diracold.net'
SITE1_HOST2 = 'perfsonar-to-disable.diracold.net'
SITE2_HOST1 = 'perfsonar.diractoremove.net'
SITE3_HOST1 = 'perfsonar.diractoadd.org'
INITIAL_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE2, SITE2_HOST1 ): 'True'
}
UPDATED_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'False',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE3, SITE3_HOST1 ): 'True'
}
class NetworkAgentSuccessTestCase( unittest.TestCase ):
""" Test class to check success scenarios.
"""
def setUp( self ):
# external dependencies
module.datetime = MagicMock()
# internal dependencies
module.S_ERROR = MagicMock()
module.S_OK = MagicMock()
module.gLogger = MagicMock()
module.AgentModule = MagicMock()
module.Network = MagicMock()
module.gConfig = MagicMock()
module.CSAPI = MagicMock()
module.createConsumer = MagicMock()
# prepare test object
module.NetworkAgent.__init__ = MagicMock( return_value = None )
module.NetworkAgent.am_getOption = MagicMock( return_value = 100 ) # buffer timeout
self.agent = module.NetworkAgent()
self.agent.initialize()
@classmethod
def tearDownClass(cls):
sys.modules.pop('DIRAC.AccountingSystem.Agent.NetworkAgent')
def test_updateNameDictionary( self ):
module.gConfig.getConfigurationTree.side_effect = [
{'OK': True, 'Value': INITIAL_CONFIG },
{'OK': True, 'Value': UPDATED_CONFIG },
]
# check if name dictionary is empty
self.assertFalse( self.agent.nameDictionary )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE1_HOST2], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE2_HOST1], SITE2 )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE3_HOST1], SITE3 )
# check if hosts were removed form dictionary
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE1_HOST2] )
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE2_HOST1] )
def test_agentExecute( self ):
module.NetworkAgent.am_getOption.return_value = '%s, %s' % ( MQURI1, MQURI2 )
module.gConfig.getConfigurationTree.return_value = {'OK': True, 'Value': INITIAL_CONFIG }
# first run
result = self.agent.execute()
self.assertTrue( result['OK'] )
# second run (simulate new messages)
self.agent.messagesCount += 10
result = self.agent.execute()
self.assertTrue( result['OK'] )
# third run (no new messages - restart consumers)
result = self.agent.execute()
self.assertTrue( result['OK'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( NetworkAgentSuccessTestCase )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
andresailer/DIRAC
|
AccountingSystem/Agent/test/Test_NetworkAgent.py
|
Python
|
gpl-3.0
| 3,827
|
[
"DIRAC"
] |
a77ed08c2909209e7e2a33216f19c0f77ba827a1247e80f6a124d069eed0fb47
|
"""Conversion functions for weather radar and rainfall data."""
from numpy import isfinite, log, ubyte
from scipy.ndimage import gaussian_filter
from skimage.exposure import equalize_hist, rescale_intensity
def dBZ_to_ubyte(I, dBZ_min=-10.0, dBZ_max=50.0, filter_stddev=3.0):
"""Convert a dBZ field into a 8-bit image, as required by Optflow. Optionally,
apply a Gaussian smoothing filter.
Parameters
----------
I : array-like
The dBZ field.
dBZ_min : float
Minimum dBZ. Values smaller than dBZ_min are set to dBZ_min. If None,
dBZ_min is computed from I.
dBZ_max : float
Maximum dBZ. Values greater than dBZ_max are set to dBZ_max. If None,
dBZ_max is computed from I.
filter_stddev : float
Standard deviation of the Gaussian filter (0=no filtering)
Returns
-------
out : ndarray(dtype=ubyte)
The processed dBZ field.
"""
I = I.copy()
MASK = isfinite(I)
if dBZ_min == None:
dBZ_min = min(I[MASK])
if dBZ_max == None:
dBZ_max = max(I[MASK])
I[~MASK] = dBZ_min
I[I < dBZ_min] = dBZ_min
I[I > dBZ_max] = dBZ_max
if filter_stddev > 0.0:
I = gaussian_filter(I, filter_stddev, mode="reflect")
I = ((I - dBZ_min) / (dBZ_max - dBZ_min)) * 255.0
return I.astype(ubyte)
def rainfall_to_ubyte(I, R_min=0.1, R_max=40.0, filter_stddev=3.0, logtrans=False):
"""Convert a rainfall intensity field into a 8-bit image, as required by
Optflow. Optionally, apply a Gaussian smoothing filter.
Parameters
----------
I : array-like
The input rainfall field.
R_min : float
Minimum rainfall intensity. Values smaller than R_min are set to R_min.
If None, R_min is computed from I.
R_max : float
Maximum rainfall intensity. Values greater than R_max are set to R_max.
If None, R_max is computed from I.
filter_stddev : float
Standard deviation of the Gaussian filter (0=no filtering)
logtrans : bool
If True, apply a log-transform to the input rainfall field. In this case,
R_min must be nonzero.
Returns
-------
out : ndarray(dtype=ubyte)
The processed rainfall field.
"""
I = I.copy()
MASK = isfinite(I)
if R_min == None:
R_min = min(I[MASK])
if R_max == None:
R_max = max(I[MASK])
I[~MASK] = R_min
I[I < R_min] = R_min
I[I > R_max] = R_max
if logtrans == True:
if R_min == 0.0:
raise ValueError("R_min must be nonzero if log-transform is used")
I = log(I)
R_min = log(R_min)
R_max = log(R_max)
# TESTING
#I = rescale_intensity(I, (R_min, R_max), (0.0, 1.0))
#I = equalize_hist(I)
#I = ((I - min(I)) / (max(I) - min(I))) * 255.0
MASK = I > R_min
# TODO: Make the threshold 128 configurable.
I[MASK] = 128.0 + ((I[MASK] - R_min) / (R_max - R_min)) * (255.0 - 128.0)
I[~MASK] = 0.0
I = I.astype(ubyte)
if filter_stddev > 0.0:
I = gaussian_filter(I, filter_stddev, mode="reflect")
return I
|
sataako/fmio-server
|
pyoptflow/utils.py
|
Python
|
mit
| 2,944
|
[
"Gaussian"
] |
5b1fd71e7a35eff3baafddcc7cba6b1cca81c57fa2fb437a6703d973e88b1fef
|
#!/usr/bin/env python3
import os
import tempfile
import re
import sys
import shutil
import glob
import sh
import obsscripts
# update rook to a newer version
PACKAGE = "rook"
SRCREPO = "rook/rook"
LATEST_OCTOPUS = "v1.2.7"
LATEST_NAUTILUS = "v1.2.7"
OBS = "https://api.opensuse.org"
IBS = "https://api.suse.de"
OOSC = sh.osc.bake(A=OBS)
IOSC = sh.osc.bake(A=IBS)
BRANCHBASE = obsscripts.obs_branchbase(OBS)
PROJECTS = {
"filesystems:ceph": {
"cmd": OOSC,
"version-tag": LATEST_OCTOPUS
},
"filesystems:ceph:octopus": {
"cmd": OOSC,
"version-tag": LATEST_OCTOPUS
},
"filesystems:ceph:nautilus": {
"cmd": OOSC,
"version-tag": LATEST_NAUTILUS
},
"filesystems:ceph:master:upstream": {
"cmd": OOSC,
"version-tag": LATEST_OCTOPUS
}
}
PROJECTS_IBS = {
"Devel:Storage:7.0": {
"cmd": IOSC,
"version-tag": LATEST_OCTOPUS
}
}
#PROJECTS = PROJECTS_IBS
#PROJECTS.update(PROJECTS_IBS)
def update_tarball(tgtversion):
print("Editing update-tarball.sh...")
txt = open("update-tarball.sh", "r").read()
f, filename = tempfile.mkstemp('.sh', text=True)
tf = os.fdopen(f, "w")
txt = re.sub('ROOK_REV="[^"]+"', 'ROOK_REV="{}"'.format(tgtversion), txt, count=1)
tf.write(txt)
tf.close()
shutil.copyfile(filename, "update-tarball.sh")
os.remove(filename)
def update_changelog(osc, tgtversion):
f, filename = tempfile.mkstemp('.txt', text=True)
tf = os.fdopen(f, "w")
fetch_changelog(osc, tgtversion, tf)
tf.close()
osc.vc("-F", filename)
os.remove(filename)
def update_changelog_with_message(osc, msg):
f, filename = tempfile.mkstemp('.txt', text=True)
tf = os.fdopen(f, "w")
tf.write("- {}\n".format(msg))
tf.close()
osc.vc("-F", filename)
os.remove(filename)
def fetch_changelog(osc, tgtversion, tofile):
"""
Pull changes, write them into tofile
"""
changes = obsscripts.fetch_github_tag(SRCREPO, tgtversion)
txt = changes["body"]
print("Raw changes:\n{}\n".format(txt))
txt = txt.replace("\r", "")
txt = re.sub(r', @\w+', '', txt)
tofile.write("- Update to {}:\n".format(tgtversion))
for line in txt.splitlines():
if line.startswith('- ') or line.startswith('* '):
tofile.write(" * {}\n".format(line[2:]))
def main():
nupdated = 0
if os.path.exists("wip"):
print("In-progress commits detected: wip/ exists. Please resolve manually.")
sys.exit(1)
override_commit = None
if "--override" in sys.argv:
override_commit = sys.argv[sys.argv.index("--override") + 1]
if "-m" in sys.argv:
override_msg = sys.argv[sys.argv.index("-m") + 1]
for repo, proj in PROJECTS.items():
osc = proj["cmd"]
commit = override_commit or proj["version-tag"]
if "--fetch-changes" in sys.argv:
f, filename = tempfile.mkstemp('.txt', text=True)
tf = os.fdopen(f, "w")
fetch_changelog(osc, proj["version-tag"], tf)
tf.close()
print(open(filename).read())
os.remove(filename)
sys.exit(0)
try:
tarball = osc.api("-X", "GET", "/source/{}/{}/update-tarball.sh".format(repo, PACKAGE))
m = re.search('ROOK_REV="(.+)"', tarball.stdout.decode('utf-8'))
if m and m.group(1) == commit:
continue
except sh.ErrorReturnCode as err:
print("Error code {}, skipping {}/{}...".format(err.exit_code, repo, PACKAGE))
continue
print("Updating {}/{} to version {}...".format(repo, PACKAGE, commit))
curr = os.getcwd()
try:
wip = os.path.join(curr, "wip")
try:
os.mkdir(wip)
except os.error:
pass
os.chdir(wip)
osc("bco", repo, PACKAGE)
os.chdir(os.path.join(wip, BRANCHBASE.format(repo), PACKAGE))
if proj["version-tag"] == commit:
update_changelog(osc, proj["version-tag"])
else:
update_changelog_with_message(
osc, override_msg or "Update to commit {}".format(commit))
update_tarball(commit)
for toremove in glob.glob('./rook-*.xz'):
print("Deleting {}...".format(toremove))
os.remove(toremove)
print(sh.sh("./update-tarball.sh"))
print(osc.ar())
print(osc.commit("-m", "Update to version {}:".format(commit)))
nupdated = nupdated + 1
finally:
os.chdir(curr)
if nupdated > 0:
print("{} updated projects now in:\nwip".format(nupdated))
if __name__ == "__main__":
main()
|
krig/obs-scripts
|
update-rook.py
|
Python
|
mit
| 4,790
|
[
"Octopus"
] |
76d79e3cd75e903d84c760fa938c810a6db5dc6da091427f9787b143ff118c07
|
# Copyright (C) 2004, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import numpy
import sys
__doc__="Vector class, including rotation-related functions."
def m2rotaxis(m):
"""
Return angles, axis pair that corresponds to rotation matrix m.
"""
# Angle always between 0 and pi
# Sense of rotation is defined by axis orientation
t=0.5*(numpy.trace(m)-1)
t=max(-1, t)
t=min(1, t)
angle=numpy.arccos(t)
if angle<1e-15:
# Angle is 0
return 0.0, Vector(1,0,0)
elif angle<numpy.pi:
# Angle is smaller than pi
x=m[2,1]-m[1,2]
y=m[0,2]-m[2,0]
z=m[1,0]-m[0,1]
axis=Vector(x,y,z)
axis.normalize()
return angle, axis
else:
# Angle is pi - special case!
m00=m[0,0]
m11=m[1,1]
m22=m[2,2]
if m00>m11 and m00>m22:
x=numpy.sqrt(m00-m11-m22+0.5)
y=m[0,1]/(2*x)
z=m[0,2]/(2*x)
elif m11>m00 and m11>m22:
y=numpy.sqrt(m11-m00-m22+0.5)
x=m[0,1]/(2*y)
z=m[1,2]/(2*y)
else:
z=numpy.sqrt(m22-m00-m11+0.5)
x=m[0,2]/(2*z)
y=m[1,2]/(2*z)
axis=Vector(x,y,z)
axis.normalize()
return numpy.pi, axis
def vector_to_axis(line, point):
"""
Returns the vector between a point and
the closest point on a line (ie. the perpendicular
projection of the point on the line).
@type line: L{Vector}
@param line: vector defining a line
@type point: L{Vector}
@param point: vector defining the point
"""
line=line.normalized()
np=point.norm()
angle=line.angle(point)
return point-line**(np*numpy.cos(angle))
def rotaxis2m(theta, vector):
"""
Calculate a left multiplying rotation matrix that rotates
theta rad around vector.
Example:
>>> m=rotaxis(pi, Vector(1,0,0))
>>> rotated_vector=any_vector.left_multiply(m)
@type theta: float
@param theta: the rotation angle
@type vector: L{Vector}
@param vector: the rotation axis
@return: The rotation matrix, a 3x3 Numeric array.
"""
vector=vector.copy()
vector.normalize()
c=numpy.cos(theta)
s=numpy.sin(theta)
t=1-c
x,y,z=vector.get_array()
rot=numpy.zeros((3,3))
# 1st row
rot[0,0]=t*x*x+c
rot[0,1]=t*x*y-s*z
rot[0,2]=t*x*z+s*y
# 2nd row
rot[1,0]=t*x*y+s*z
rot[1,1]=t*y*y+c
rot[1,2]=t*y*z-s*x
# 3rd row
rot[2,0]=t*x*z-s*y
rot[2,1]=t*y*z+s*x
rot[2,2]=t*z*z+c
return rot
rotaxis=rotaxis2m
def refmat(p,q):
"""
Return a (left multiplying) matrix that mirrors p onto q.
Example:
>>> mirror=refmat(p,q)
>>> qq=p.left_multiply(mirror)
>>> print q, qq # q and qq should be the same
@type p,q: L{Vector}
@return: The mirror operation, a 3x3 Numeric array.
"""
p.normalize()
q.normalize()
if (p-q).norm()<1e-5:
return numpy.identity(3)
pq=p-q
pq.normalize()
b=pq.get_array()
b.shape=(3, 1)
i=numpy.identity(3)
ref=i-2*numpy.dot(b, numpy.transpose(b))
return ref
def rotmat(p,q):
"""
Return a (left multiplying) matrix that rotates p onto q.
Example:
>>> r=rotmat(p,q)
>>> print q, p.left_multiply(r)
@param p: moving vector
@type p: L{Vector}
@param q: fixed vector
@type q: L{Vector}
@return: rotation matrix that rotates p onto q
@rtype: 3x3 Numeric array
"""
rot=numpy.dot(refmat(q, -p), refmat(p, -p))
return rot
def calc_angle(v1, v2, v3):
"""
Calculate the angle between 3 vectors
representing 3 connected points.
@param v1, v2, v3: the tree points that define the angle
@type v1, v2, v3: L{Vector}
@return: angle
@rtype: float
"""
v1=v1-v2
v3=v3-v2
return v1.angle(v3)
def calc_dihedral(v1, v2, v3, v4):
"""
Calculate the dihedral angle between 4 vectors
representing 4 connected points. The angle is in
]-pi, pi].
@param v1, v2, v3, v4: the four points that define the dihedral angle
@type v1, v2, v3, v4: L{Vector}
"""
ab=v1-v2
cb=v3-v2
db=v4-v3
u=ab**cb
v=db**cb
w=u**v
angle=u.angle(v)
# Determine sign of angle
try:
if cb.angle(w)>0.001:
angle=-angle
except ZeroDivisionError:
# dihedral=pi
pass
return angle
class Vector:
"3D vector"
def __init__(self, x, y=None, z=None):
if y is None and z is None:
# Array, list, tuple...
if len(x)!=3:
raise "Vector: x is not a list/tuple/array of 3 numbers"
self._ar=numpy.array(x, 'd')
else:
# Three numbers
self._ar=numpy.array((x, y, z), 'd')
def __repr__(self):
x,y,z=self._ar
return "<Vector %.2f, %.2f, %.2f>" % (x,y,z)
def __neg__(self):
"Return Vector(-x, -y, -z)"
a=-self._ar
return Vector(a)
def __add__(self, other):
"Return Vector+other Vector or scalar"
if isinstance(other, Vector):
a=self._ar+other._ar
else:
a=self._ar+numpy.array(other)
return Vector(a)
def __sub__(self, other):
"Return Vector-other Vector or scalar"
if isinstance(other, Vector):
a=self._ar-other._ar
else:
a=self._ar-numpy.array(other)
return Vector(a)
def __mul__(self, other):
"Return Vector.Vector (dot product)"
return sum(self._ar*other._ar)
def __div__(self, x):
"Return Vector(coords/a)"
a=self._ar/numpy.array(x)
return Vector(a)
def __pow__(self, other):
"Return VectorxVector (cross product) or Vectorxscalar"
if isinstance(other, Vector):
a,b,c=self._ar
d,e,f=other._ar
c1=numpy.linalg.det(numpy.array(((b,c), (e,f))))
c2=-numpy.linalg.det(numpy.array(((a,c), (d,f))))
c3=numpy.linalg.det(numpy.array(((a,b), (d,e))))
return Vector(c1,c2,c3)
else:
a=self._ar*numpy.array(other)
return Vector(a)
def __getitem__(self, i):
return self._ar[i]
def __setitem__(self, i, value):
self._ar[i]=value
def norm(self):
"Return vector norm"
return numpy.sqrt(sum(self._ar*self._ar))
def normsq(self):
"Return square of vector norm"
return abs(sum(self._ar*self._ar))
def normalize(self):
"Normalize the Vector"
self._ar=self._ar/self.norm()
def normalized(self):
"Return a normalized copy of the Vector"
v=self.copy()
v.normalize()
return v
def angle(self, other):
"Return angle between two vectors"
n1=self.norm()
n2=other.norm()
c=(self*other)/(n1*n2)
# Take care of roundoff errors
c=min(c,1)
c=max(-1,c)
return numpy.arccos(c)
def get_array(self):
"Return (a copy of) the array of coordinates"
return numpy.array(self._ar)
def left_multiply(self, matrix):
"Return Vector=Matrix x Vector"
a=numpy.dot(matrix, self._ar)
return Vector(a)
def right_multiply(self, matrix):
"Return Vector=Vector x Matrix"
a=numpy.dot(self._ar, matrix)
return Vector(a)
def copy(self):
"Return a deep copy of the Vector"
return Vector(self._ar)
if __name__=="__main__":
from numpy.random import random
v1=Vector(0,0,1)
v2=Vector(0,0,0)
v3=Vector(0,1,0)
v4=Vector(1,1,0)
v4.normalize()
print v4
print calc_angle(v1, v2, v3)
dih=calc_dihedral(v1, v2, v3, v4)
# Test dihedral sign
assert(dih>0)
print "DIHEDRAL ", dih
ref=refmat(v1, v3)
rot=rotmat(v1, v3)
print v3
print v1.left_multiply(ref)
print v1.left_multiply(rot)
print v1.right_multiply(numpy.transpose(rot))
# -
print v1-v2
print v1-1
print v1+(1,2,3)
# +
print v1+v2
print v1+3
print v1-(1,2,3)
# *
print v1*v2
# /
print v1/2
print v1/(1,2,3)
# **
print v1**v2
print v1**2
print v1**(1,2,3)
# norm
print v1.norm()
# norm squared
print v1.normsq()
# setitem
v1[2]=10
print v1
# getitem
print v1[2]
print numpy.array(v1)
print "ROT"
angle=random()*numpy.pi
axis=Vector(random(3)-random(3))
axis.normalize()
m=rotaxis(angle, axis)
cangle, caxis=m2rotaxis(m)
print angle-cangle
print axis-caxis
print
|
NirBenTalLab/proorigami-cde-package
|
cde-root/usr/lib64/python2.4/site-packages/Bio/PDB/Vector.py
|
Python
|
mit
| 9,064
|
[
"Biopython"
] |
28a226018bbda3acf8bb7c00087336a9e63f8ccc6bf7635f632a8d9e5a15bbb5
|
#
# ast_higher_order_visitor.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.visitors.ast_visitor import ASTVisitor
class ASTHigherOrderVisitor(ASTVisitor):
"""
This visitor is used to visit each node of the meta_model and and preform an arbitrary on it..
"""
def __init__(self, visit_funcs=list(), endvisit_funcs=list()):
self.visit_funcs = list()
self.endvisit_funcs = list()
super(ASTHigherOrderVisitor, self).__init__()
# check if a list of funcs is handed over or not
if isinstance(visit_funcs, list):
self.visit_funcs.extend(visit_funcs)
elif callable(visit_funcs):
self.visit_funcs.append(visit_funcs)
# analogously for end visit funcs
if isinstance(endvisit_funcs, list):
self.endvisit_funcs.extend(endvisit_funcs)
elif callable(endvisit_funcs):
self.endvisit_funcs.append(endvisit_funcs)
def visit(self, node):
for fun in self.visit_funcs:
fun(node)
def endvisit(self, node):
for fun in self.endvisit_funcs:
fun(node)
|
kperun/nestml
|
pynestml/visitors/ast_higher_order_visitor.py
|
Python
|
gpl-2.0
| 1,771
|
[
"VisIt"
] |
b20df6781549a13942e8e08566b10a6c6848358303385089724d7a8cf88d668c
|
""" SiteStatus helper
Module that acts as a helper for knowing the status of a site.
It takes care of switching between the CS and the RSS.
The status is kept in the RSSCache object, which is a small wrapper on top of DictCache
"""
import errno
import math
from time import sleep
from datetime import datetime, timedelta
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Client.WMSAdministratorClient import WMSAdministratorClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.ResourceStatusSystem.Utilities.RSSCacheNoThread import RSSCache
from DIRAC.ResourceStatusSystem.Utilities.RssConfiguration import RssConfiguration
class SiteStatus(metaclass=DIRACSingleton):
"""
RSS helper to interact with the 'Site' family on the DB. It provides the most
demanded functions and a cache to avoid hitting the server too often.
It provides four methods to interact with the site statuses:
* getSiteStatuses
* isUsableSite
* getUsableSites
* getSites
"""
def __init__(self):
"""
Constructor, initializes the rssClient.
"""
self.log = gLogger.getSubLogger(self.__class__.__name__)
self.rssConfig = RssConfiguration()
self.__opHelper = Operations()
self.rssFlag = ResourceStatus().rssFlag
self.rsClient = ResourceStatusClient()
cacheLifeTime = int(self.rssConfig.getConfigCache())
# RSSCache only affects the calls directed to RSS, if using the CS it is not used.
self.rssCache = RSSCache(cacheLifeTime, self.__updateRssCache)
def __updateRssCache(self):
"""Method used to update the rssCache.
It will try 5 times to contact the RSS before giving up
"""
meta = {"columns": ["Name", "Status", "VO"]}
for ti in range(5):
rawCache = self.rsClient.selectStatusElement("Site", "Status", meta=meta)
if rawCache["OK"]:
break
self.log.warn("Can't get resource's status", rawCache["Message"] + "; trial %d" % ti)
sleep(math.pow(ti, 2))
self.rsClient = ResourceStatusClient()
if not rawCache["OK"]:
return rawCache
return S_OK(getCacheDictFromRawData(rawCache["Value"]))
def getSiteStatuses(self, siteNames=None):
"""
Method that queries the database for status of the sites in a given list.
A single string site name may also be provides as "siteNames"
If the input is None, it is interpreted as * ( all ).
If match is positive, the output looks like::
{
'test1.test1.org': 'Active',
'test2.test2.org': 'Banned',
}
Examples::
>>> siteStatus.getSiteStatuses( ['test1.test1.uk', 'test2.test2.net', 'test3.test3.org'] )
S_OK( { 'test1.test1.org': 'Active', 'test2.test2.net': 'Banned', 'test3.test3.org': 'Active' } )
>>> siteStatus.getSiteStatuses( 'NotExists')
S_ERROR( ... ))
>>> siteStatus.getSiteStatuses( None )
S_OK( { 'test1.test1.org': 'Active',
'test2.test2.net': 'Banned', },
...
}
)
:param siteNames: name(s) of the sites to be matched
:type siteNames: list, str
:return: S_OK() || S_ERROR()
"""
if self.rssFlag:
return self.__getRSSSiteStatus(siteNames)
else:
siteStatusDict = {}
wmsAdmin = WMSAdministratorClient()
if siteNames:
if isinstance(siteNames, str):
siteNames = [siteNames]
for siteName in siteNames:
result = wmsAdmin.getSiteMaskStatus(siteName)
if not result["OK"]:
return result
else:
siteStatusDict[siteName] = result["Value"]
else:
result = wmsAdmin.getSiteMaskStatus()
if not result["OK"]:
return result
else:
siteStatusDict = result["Value"]
return S_OK(siteStatusDict)
def __getRSSSiteStatus(self, siteName=None):
"""Gets from the cache or the RSS the Sites status. The cache is a
copy of the DB table. If it is not on the cache, most likely is not going
to be on the DB.
There is one exception: item just added to the CS, e.g. new Element.
The period between it is added to the DB and the changes are propagated
to the cache will be inconsistent, but not dangerous. Just wait <cacheLifeTime>
minutes.
:param siteName: name of the site
:type siteName: str
:return: dict
"""
cacheMatch = self.rssCache.match(siteName, "", "", "all") # sites have VO="all".
self.log.debug("__getRSSSiteStatus")
self.log.debug(cacheMatch)
return cacheMatch
def getUsableSites(self, siteNames=None):
"""
Returns all sites that are usable if their
statusType is either Active or Degraded; in a list.
examples
>>> siteStatus.getUsableSites( ['test1.test1.uk', 'test2.test2.net', 'test3.test3.org'] )
S_OK( ['test1.test1.uk', 'test3.test3.org'] )
>>> siteStatus.getUsableSites( None )
S_OK( ['test1.test1.uk', 'test3.test3.org', 'test4.test4.org', 'test5.test5.org', ...] )
>>> siteStatus.getUsableSites( 'NotExists' )
S_ERROR( ... )
:Parameters:
**siteNames** - `List` or `str`
name(s) of the sites to be matched
:return: S_OK() || S_ERROR()
"""
siteStatusDictRes = self.getSiteStatuses(siteNames)
if not siteStatusDictRes["OK"]:
return siteStatusDictRes
siteStatusList = [x[0] for x in siteStatusDictRes["Value"].items() if x[1] in ["Active", "Degraded"]]
return S_OK(siteStatusList)
def getSites(self, siteState="Active"):
"""
By default, it gets the currently active site list
examples
>>> siteStatus.getSites()
S_OK( ['test1.test1.uk', 'test3.test3.org'] )
>>> siteStatus.getSites( 'Active' )
S_OK( ['test1.test1.uk', 'test3.test3.org'] )
>>> siteStatus.getSites( 'Banned' )
S_OK( ['test0.test0.uk', ... ] )
>>> siteStatus.getSites( 'All' )
S_OK( ['test1.test1.uk', 'test3.test3.org', 'test4.test4.org', 'test5.test5.org'...] )
>>> siteStatus.getSites( None )
S_ERROR( ... )
:Parameters:
**siteState** - `String`
state of the sites to be matched
:return: S_OK() || S_ERROR()
"""
if not siteState:
return S_ERROR(DErrno.ERESUNK, "siteState parameter is empty")
siteStatusDictRes = self.getSiteStatuses()
if not siteStatusDictRes["OK"]:
return siteStatusDictRes
if siteState.capitalize() == "All":
# if no siteState is set return everything
siteList = list(siteStatusDictRes["Value"])
else:
# fix case sensitive string
siteState = siteState.capitalize()
allowedStateList = ["Active", "Banned", "Degraded", "Probing", "Error", "Unknown"]
if siteState not in allowedStateList:
return S_ERROR(errno.EINVAL, "Not a valid status, parameter rejected")
siteList = [x[0] for x in siteStatusDictRes["Value"].items() if x[1] == siteState]
return S_OK(siteList)
def setSiteStatus(self, site, status, comment="No comment"):
"""
Set the status of a site in the 'SiteStatus' table of RSS
examples
>>> siteStatus.banSite( 'site1.test.test' )
S_OK()
>>> siteStatus.banSite( None )
S_ERROR( ... )
:Parameters:
**site** - `String`
the site that is going to be banned
**comment** - `String`
reason for banning
:return: S_OK() || S_ERROR()
"""
if not status:
return S_ERROR(DErrno.ERESUNK, "status parameter is empty")
# fix case sensitive string
status = status.capitalize()
allowedStateList = ["Active", "Banned", "Degraded", "Probing", "Error", "Unknown"]
if status not in allowedStateList:
return S_ERROR(errno.EINVAL, "Not a valid status, parameter rejected")
if self.rssFlag:
result = getProxyInfo()
if result["OK"]:
tokenOwner = result["Value"]["username"]
else:
return S_ERROR("Unable to get user proxy info %s " % result["Message"])
tokenExpiration = datetime.utcnow() + timedelta(days=1)
self.rssCache.acquireLock()
try:
result = self.rsClient.modifyStatusElement(
"Site",
"Status",
status=status,
name=site,
tokenExpiration=tokenExpiration,
reason=comment,
tokenOwner=tokenOwner,
)
if result["OK"]:
self.rssCache.refreshCache()
else:
_msg = "Error updating status of site %s to %s" % (site, status)
gLogger.warn("RSS: %s" % _msg)
# Release lock, no matter what.
finally:
self.rssCache.releaseLock()
else:
if status in ["Active", "Degraded"]:
result = WMSAdministratorClient().allowSite()
else:
result = WMSAdministratorClient().banSite()
return result
def getCacheDictFromRawData(rawList):
"""
Formats the raw data list, which we know it must have tuples of four elements.
( element1, element2 ) into a dictionary of tuples with the format
{ ( element1 ): element2 )}.
The resulting dictionary will be the new Cache.
It happens that element1 is elementName,
element4 is status.
:Parameters:
**rawList** - `list`
list of three element tuples [( element1, element2 ),... ]
:return: dict of the form { ( elementName ) : status, ... }
"""
res = {}
for entry in rawList:
res.update({(entry[0]): entry[1]})
return res
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/Client/SiteStatus.py
|
Python
|
gpl-3.0
| 10,938
|
[
"DIRAC"
] |
99f06d7dc0e59b9402f4694885cd6996a17df589f18bb3d5fe7c64443f6aa909
|
# -*- coding: utf-8 -*-
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import warnings
from collections.abc import Iterable
import ctypes
import numpy as np
from scipy._lib.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring)
from scipy._lib._ccallback import LowLevelCallable
from scipy import optimize
from scipy import integrate
from scipy import interpolate
import scipy.special as sc
import scipy.special._ufuncs as scu
from scipy._lib._util import _lazyselect, _lazywhere
from . import _stats
from ._rvs_sampling import rvs_ratio_uniforms
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray,
_get_fixed_fit_value, _check_shape)
from ._ksstats import kolmogn, kolmognp, kolmogni
from ._constants import (_XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX,
_SQRT_2_OVER_PI, _LOG_SQRT_2_OVER_PI)
# In numpy 1.12 and above, np.power refuses to raise integers to negative
# powers, and `np.float_power` is a new replacement.
try:
float_power = np.float_power
except AttributeError:
float_power = np.power
def _remove_optimizer_parameters(kwds):
"""
Remove the optimizer-related keyword arguments 'loc', 'scale' and
'optimizer' from `kwds`. Then check that `kwds` is empty, and
raise `TypeError("Unknown arguments: %s." % kwds)` if it is not.
This function is used in the fit method of distributions that override
the default method and do not use the default optimization code.
`kwds` is modified in-place.
"""
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
r"""Kolmogorov-Smirnov one-sided test statistic distribution.
This is the distribution of the one-sided Kolmogorov-Smirnov (KS)
statistics :math:`D_n^+` and :math:`D_n^-`
for a finite sample size ``n`` (the shape parameter).
%(before_notes)s
Notes
-----
:math:`D_n^+` and :math:`D_n^-` are given by
.. math::
D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\
D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`ksone` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
See Also
--------
kstwobign, kstwo, kstest
References
----------
.. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours
for probability distribution functions", The Annals of Mathematical
Statistics, 22(4), pp 592-596 (1951).
%(example)s
"""
def _pdf(self, x, n):
return -scu._smirnovp(n, x)
def _cdf(self, x, n):
return scu._smirnovc(n, x)
def _sf(self, x, n):
return sc.smirnov(n, x)
def _ppf(self, q, n):
return scu._smirnovci(n, q)
def _isf(self, q, n):
return sc.smirnovi(n, q)
ksone = ksone_gen(a=0.0, b=1.0, name='ksone')
class kstwo_gen(rv_continuous):
r"""Kolmogorov-Smirnov two-sided test statistic distribution.
This is the distribution of the two-sided Kolmogorov-Smirnov (KS)
statistic :math:`D_n` for a finite sample size ``n``
(the shape parameter).
%(before_notes)s
Notes
-----
:math:`D_n` is given by
.. math::
D_n &= \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a (continuous) CDF and :math:`F_n` is an empirical CDF.
`kstwo` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
See Also
--------
kstwobign, ksone, kstest
References
----------
.. [1] Simard, R., L'Ecuyer, P. "Computing the Two-Sided
Kolmogorov-Smirnov Distribution", Journal of Statistical Software,
Vol 39, 11, 1-18 (2011).
%(example)s
"""
def _get_support(self, n):
return (0.5/(n if not isinstance(n, Iterable) else np.asanyarray(n)),
1.0)
def _pdf(self, x, n):
return kolmognp(n, x)
def _cdf(self, x, n):
return kolmogn(n, x)
def _sf(self, x, n):
return kolmogn(n, x, cdf=False)
def _ppf(self, q, n):
return kolmogni(n, q, cdf=True)
def _isf(self, q, n):
return kolmogni(n, q, cdf=False)
# Use the pdf, (not the ppf) to compute moments
kstwo = kstwo_gen(momtype=0, a=0.0, b=1.0, name='kstwo')
class kstwobign_gen(rv_continuous):
r"""Limiting distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov
statistic :math:`\sqrt{n} D_n` that measures the maximum absolute
distance of the theoretical (continuous) CDF from the empirical CDF.
(see `kstest`).
%(before_notes)s
Notes
-----
:math:`\sqrt{n} D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`kstwobign` describes the asymptotic distribution (i.e. the limit of
:math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the
empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`.
%(after_notes)s
See Also
--------
ksone, kstwo, kstest
References
----------
.. [1] Feller, W. "On the Kolmogorov-Smirnov Limit Theorems for Empirical
Distributions", Ann. Math. Statist. Vol 19, 177-189 (1948).
%(example)s
"""
def _pdf(self, x):
return -scu._kolmogp(x)
def _cdf(self, x):
return scu._kolmogc(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return scu._kolmogci(q)
def _isf(self, q):
return sc.kolmogi(q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (``loc``) keyword specifies the mean.
The scale (``scale``) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_normal(size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` argument is ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
def _munp(self, n):
"""
@returns Moments of standard normal distribution for integer n >= 0
See eq. 16 of https://arxiv.org/abs/1209.4340v2
"""
if n % 2 == 0:
return sc.factorial2(n - 1)
else:
return 0.
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` ([1]_, [2]_) is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons,
p. 173 (1994).
.. [2] Anthony A. Salvia, "Reliability applications of the Alpha
Distribution", IEEE Transactions on Reliability, Vol. R-34,
No. 3, pp. 251-252 (1985).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 < x < 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b, size=None, random_state=None):
return random_state.beta(a, b, size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
# We already got these from kwds, so just pop them.
kwds.pop('floc', None)
kwds.pop('fscale', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
_remove_optimizer_parameters(kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for :math:`x >= 0`, :math:`a > 0`, :math:`b > 0`, where
:math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b, size=None, random_state=None):
u1 = gamma.rvs(a, size=size, random_state=random_state)
u2 = gamma.rvs(b, size=size, random_state=random_state)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 <= x <= 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr12 : Burr Type XII distribution
mielke : Mielke Beta-Kappa / Dagum distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{-c - 1} / (1 + x^{-c})^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_. The distribution
is also commonly referred to as the Dagum distribution [2]_. If the
parameter :math:`c < 1` then the mean of the distribution does not
exist and if :math:`c < 2` the variance does not exist [2]_.
The PDF is finite at the left endpoint :math:`x = 0` if :math:`c * d >= 1`.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://en.wikipedia.org/wiki/Dagum_distribution
.. [3] Kleiber, Christian. "A guide to the Dagum distributions."
Modeling Income Distributions and Lorenz Curves pp 97-117 (2008).
%(example)s
"""
# Do not set _support_mask to rv_continuous._open_support_mask
# Whether the left-hand endpoint is suitable for pdf evaluation is dependent
# on the values of c and d: if c*d >= 1, the pdf is finite, otherwise infinite.
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
output = _lazywhere(x == 0, [x, c, d],
lambda x_, c_, d_: c_ * d_ * (x_**(c_*d_-1)) / (1 + x_**c_),
f2 = lambda x_, c_, d_: (c_ * d_ * (x_ ** (-c_ - 1.0)) /
((1 + x_ ** (-c_)) ** (d_ + 1.0))))
if output.ndim == 0:
return output[()]
return output
def _logpdf(self, x, c, d):
output = _lazywhere(
x == 0, [x, c, d],
lambda x_, c_, d_: (np.log(c_) + np.log(d_) + sc.xlogy(c_*d_ - 1, x_)
- (d_+1) * sc.log1p(x_**(c_))),
f2 = lambda x_, c_, d_: (np.log(c_) + np.log(d_)
+ sc.xlogy(-c_ - 1, x_)
- sc.xlog1py(d_+1, x_**(-c_))))
if output.ndim == 0:
return output[()]
return output
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _logcdf(self, x, c, d):
return sc.log1p(x**(-c)) * (-d)
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return np.log1p(- (1 + x**(-c))**(-d))
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _stats(self, c, d):
nc = np.arange(1, 5).reshape(4,1) / c
#ek is the kth raw moment, e1 is the mean e2-e1**2 variance etc.
e1, e2, e3, e4 = sc.beta(d + nc, 1. - nc) * d
mu = np.where(c > 1.0, e1, np.nan)
mu2_if_c = e2 - mu**2
mu2 = np.where(c > 2.0, mu2_if_c, np.nan)
g1 = _lazywhere(
c > 3.0,
(c, e1, e2, e3, mu2_if_c),
lambda c, e1, e2, e3, mu2_if_c: (e3 - 3*e2*e1 + 2*e1**3) / np.sqrt((mu2_if_c)**3),
fillvalue=np.nan)
g2 = _lazywhere(
c > 4.0,
(c, e1, e2, e3, e4, mu2_if_c),
lambda c, e1, e2, e3, e4, mu2_if_c: (
((e4 - 4*e3*e1 + 6*e2*e1**2 - 3*e1**4) / mu2_if_c**2) - 3),
fillvalue=np.nan)
return mu, mu2, g1, g2
def _munp(self, n, c, d):
def __munp(n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
n, c, d = np.asarray(n), np.asarray(c), np.asarray(d)
return _lazywhere((c > n) & (n == n) & (d == d), (c, d, n),
lambda c, d, n: __munp(n, c, d),
np.nan)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{c-1} / (1 + x^c)^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
and :math:`d`.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
.. [3] "Burr distribution",
https://en.wikipedia.org/wiki/Burr_distribution
%(example)s
"""
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2}
for :math:`x >= 0` and :math:`c > 0`.
`fisk` takes ``c`` as a shape parameter for :math:`c`.
`fisk` is a special case of `burr` or `burr12` with ``d=1``.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._pdf(x, c, 1.0)
def _cdf(self, x, c):
return burr._cdf(x, c, 1.0)
def _sf(self, x, c):
return burr._sf(x, c, 1.0)
def _logpdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._logpdf(x, c, 1.0)
def _logcdf(self, x, c):
return burr._logcdf(x, c, 1.0)
def _logsf(self, x, c):
return burr._logsf(x, c, 1.0)
def _ppf(self, x, c):
return burr._ppf(x, c, 1.0)
def _munp(self, n, c):
return burr._munp(n, c, 1.0)
def _stats(self, c):
return burr._stats(c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
x^{k-1} \exp \left( -x^2/2 \right)
for :math:`x >= 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation). :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return np.sqrt(chi2.rvs(df, size=size, random_state=random_state))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
x^{k/2-1} \exp \left( -x/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation).
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return random_state.chisquare(df, size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return 2*sc.gammaincinv(df/2, p)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
u = random_state.uniform(size=size)
gm = gamma.rvs(a, size=size, random_state=random_state)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
u = random_state.uniform(size=size)
w = weibull_min.rvs(c, size=size, random_state=random_state)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_exponential(size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where :math:`x` is a real number and :math:`K > 0`.
It can be thought of as the sum of a standard normal random variable
and an independent exponentially distributed random variable with rate
``1/K``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and
:math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K, size=None, random_state=None):
expval = random_state.standard_exponential(size) * K
gval = random_state.standard_normal(size)
return expval + gval
def _pdf(self, x, K):
# exponnorm.pdf(x, K) =
# 1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * (expval * sc.erfc(-(x - invK) / np.sqrt(2)))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
See Also
--------
weibull_min, numpy.random.RandomState.weibull
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c [1-\exp(-x^c)]^{a-1} \exp(-x^c) x^{c-1}
and its cumulative distribution function is:
.. math::
F(x, a, c) = [1-\exp(-x^c)]^a
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters:
* :math:`a` is the exponentiation parameter,
with the special case :math:`a=1` corresponding to the
(non-exponentiated) Weibull distribution `weibull_min`.
* :math:`c` is the shape parameter of the non-exponentiated Weibull law.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution
%(example)s
"""
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
for :math:`x >= 0` and :math:`c > 0`.
`fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c, size=None, random_state=None):
z = random_state.standard_normal(size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0`.
`foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return abs(cauchy.rvs(loc=c, size=size,
random_state=random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0`.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, size=None, random_state=None):
return random_state.f(dfn, dfd, size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + sc.xlogy(n/2 - 1, x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c, size=None, random_state=None):
return abs(random_state.standard_normal(size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
The Weibull Minimum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is also often simply called the Weibull
distribution. It arises as the limiting distribution of the rescaled
minimum of iid random variables.
%(before_notes)s
See Also
--------
weibull_max, numpy.random.RandomState.weibull, exponweib
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
(named :math:`k` in Wikipedia article and :math:`a` in
``numpy.random.weibull``). Special shape values are :math:`c=1` and
:math:`c=2` where Weibull distribution reduces to the `expon` and
`rayleigh` distributions respectively.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
The Weibull Maximum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is the limiting distribution of rescaled
maximum of iid random variables. This is the distribution of -X
if X is from the `weibull_min` function.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
# Public methods to be deprecated in frechet_r and frechet_l:
# ['__call__', 'cdf', 'entropy', 'expect', 'fit', 'fit_loc_scale', 'freeze',
# 'interval', 'isf', 'logcdf', 'logpdf', 'logsf', 'mean', 'median', 'moment',
# 'nnlf', 'pdf', 'ppf', 'rvs', 'sf', 'stats', 'std', 'var']
_frechet_r_deprec_msg = """\
The distribution `frechet_r` is a synonym for `weibull_min`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_min`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_r_gen(weibull_min_gen):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
Notes
-----
%(after_notes)s
%(example)s
"""
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_min_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_min_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_min_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_min_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_min_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_min_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_min_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_min_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_min_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_min_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_min_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_min_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_min_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def median(self, *args, **kwargs):
return weibull_min_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_min_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_min_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_min_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_min_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_min_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_min_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_min_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def std(self, *args, **kwargs):
return weibull_min_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def var(self, *args, **kwargs):
return weibull_min_gen.var(self, *args, **kwargs)
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
_frechet_l_deprec_msg = """\
The distribution `frechet_l` is a synonym for `weibull_max`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_max`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_l_gen(weibull_max_gen):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
Notes
-----
%(after_notes)s
%(example)s
"""
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_max_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_max_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_max_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_max_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_max_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_max_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_max_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_max_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_max_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_max_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_max_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_max_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_max_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def median(self, *args, **kwargs):
return weibull_max_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_max_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_max_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_max_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_max_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_max_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_max_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_max_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def std(self, *args, **kwargs):
return weibull_max_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def var(self, *args, **kwargs):
return weibull_max_gen.var(self, *args, **kwargs)
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for :math:`x >= 0`, :math:`c > 0`.
`genlogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _get_support(self, c):
c = np.asarray(c)
b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
a = np.where(c >= 0, self.a, self.a)
return a, b
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _stats(self, c, moments='mv'):
if 'm' not in moments:
m = None
else:
m = _lazywhere(c < 1, (c,),
lambda xi: 1/(1 - xi),
np.inf)
if 'v' not in moments:
v = None
else:
v = _lazywhere(c < 1/2, (c,),
lambda xi: 1 / (1 - xi)**2 / (1 - 2*xi),
np.nan)
if 's' not in moments:
s = None
else:
s = _lazywhere(c < 1/3, (c,),
lambda xi: 2 * (1 + xi) * np.sqrt(1 - 2*xi) /
(1 - 3*xi),
np.nan)
if 'k' not in moments:
k = None
else:
k = _lazywhere(c < 1/4, (c,),
lambda xi: 3 * (1 - 2*xi) * (2*xi**2 + xi + 3) /
(1 - 3*xi) / (1 - 4*xi) - 3,
np.nan)
return m, v, s, k
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is:
.. math::
f(x, c) = \begin{cases}
\exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\
\exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for }
x \le 1/c, c > 0
\end{cases}
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.where(abs(c) == np.inf, 0, 1)
def _get_support(self, c):
_b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
_a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return _a, _b
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} \exp(-x)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` takes ``a`` as a shape parameter for :math:`a`.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
return random_state.standard_gamma(a, size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# We already have this value, so just pop it from kwds.
kwds.pop('floc', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data).mean() = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return a > 0
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is:
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
# gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c > 0
def _get_support(self, c):
return self.a, 1.0/c
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# https://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
for :math:`x >= 0`.
`halfnorm` is a special case of `chi` with ``df=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return abs(random_state.standard_normal(size=size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, and
:math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
:math:`F[2, 1]` is the Gauss hypergeometric function
`scipy.special.hyp2f1`.
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
for :math:`x >= 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`invgamma` takes ``a`` as a shape parameter for :math:`a`.
`invgamma` is a special case of `gengamma` with ``c=-1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x >= 0` and :math:`\mu > 0`.
`invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
When :math:`\mu` is too small, evaluating the cumulative distribution
function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for :math:`\mu \le 0.0028`.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu, size=None, random_state=None):
return random_state.wald(mu, 1.0, size=size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class geninvgauss_gen(rv_continuous):
r"""A Generalized Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `geninvgauss` is:
.. math::
f(x, p, b) = x^{p-1} \exp(-b (x + 1/x) / 2) / (2 K_p(b))
where `x > 0`, and the parameters `p, b` satisfy `b > 0` ([1]_).
:math:`K_p` is the modified Bessel function of second kind of order `p`
(`scipy.special.kv`).
%(after_notes)s
The inverse Gaussian distribution `stats.invgauss(mu)` is a special case of
`geninvgauss` with `p = -1/2`, `b = 1 / mu` and `scale = mu`.
Generating random variates is challenging for this distribution. The
implementation is based on [2]_.
References
----------
.. [1] O. Barndorff-Nielsen, P. Blaesild, C. Halgreen, "First hitting time
models for the generalized inverse gaussian distribution",
Stochastic Processes and their Applications 7, pp. 49--54, 1978.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
%(example)s
"""
def _argcheck(self, p, b):
return (p == p) & (b > 0)
def _logpdf(self, x, p, b):
# kve instead of kv works better for large values of b
# warn if kve produces infinite values and replace by nan
# otherwise c = -inf and the results are often incorrect
@np.vectorize
def logpdf_single(x, p, b):
return _stats.geninvgauss_logpdf(x, p, b)
z = logpdf_single(x, p, b)
if np.isnan(z).any():
msg = ("Infinite values encountered in scipy.special.kve(p, b). "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return z
def _pdf(self, x, p, b):
# relying on logpdf avoids overflow of x**(p-1) for large x and p
return np.exp(self._logpdf(x, p, b))
def _cdf(self, x, *args):
_a, _b = self._get_support(*args)
@np.vectorize
def _cdf_single(x, *args):
p, b = args
user_data = np.array([p, b], float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, '_geninvgauss_pdf', user_data)
return integrate.quad(llc, _a, x)[0]
return _cdf_single(x, *args)
def _logquasipdf(self, x, p, b):
# log of the quasi-density (w/o normalizing constant) used in _rvs
return _lazywhere(x > 0, (x, p, b),
lambda x, p, b: (p - 1)*np.log(x) - b*(x + 1/x)/2,
-np.inf)
def _rvs(self, p, b, size=None, random_state=None):
# if p and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(p) and np.isscalar(b):
out = self._rvs_scalar(p, b, size, random_state)
elif p.size == 1 and b.size == 1:
out = self._rvs_scalar(p.item(), b.item(), size, random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
p, b = np.broadcast_arrays(p, b)
# p and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting p and b.
# bc is a tuple the same lenth as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(p.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([p, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples, random_state).reshape(shp)
it.iternext()
if size == ():
out = out[()]
return out
def _rvs_scalar(self, p, b, numsamples, random_state):
# following [2], the quasi-pdf is used instead of the pdf for the
# generation of rvs
invert_res = False
if not(numsamples):
numsamples = 1
if p < 0:
# note: if X is geninvgauss(p, b), then 1/X is geninvgauss(-p, b)
p = -p
invert_res = True
m = self._mode(p, b)
# determine method to be used following [2]
ratio_unif = True
if p >= 1 or b > 1:
# ratio of uniforms with mode shift below
mode_shift = True
elif b >= min(0.5, 2 * np.sqrt(1 - p) / 3):
# ratio of uniforms without mode shift below
mode_shift = False
else:
# new algorithm in [2]
ratio_unif = False
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
x = np.zeros(N)
simulated = 0
if ratio_unif:
# use ratio of uniforms method
if mode_shift:
a2 = -2 * (p + 1) / b - m
a1 = 2 * m * (p - 1) / b - 1
# find roots of x**3 + a2*x**2 + a1*x + m (Cardano's formula)
p1 = a1 - a2**2 / 3
q1 = 2 * a2**3 / 27 - a2 * a1 / 3 + m
phi = np.arccos(-q1 * np.sqrt(-27 / p1**3) / 2)
s1 = -np.sqrt(-4 * p1 / 3)
root1 = s1 * np.cos(phi / 3 + np.pi / 3) - a2 / 3
root2 = -s1 * np.cos(phi / 3) - a2 / 3
# root3 = s1 * np.cos(phi / 3 - np.pi / 3) - a2 / 3
# if g is the quasipdf, rescale: g(x) / g(m) which we can write
# as exp(log(g(x)) - log(g(m))). This is important
# since for large values of p and b, g cannot be evaluated.
# denote the rescaled quasipdf by h
lm = self._logquasipdf(m, p, b)
d1 = self._logquasipdf(root1, p, b) - lm
d2 = self._logquasipdf(root2, p, b) - lm
# compute the bounding rectangle w.r.t. h. Note that
# np.exp(0.5*d1) = np.sqrt(g(root1)/g(m)) = np.sqrt(h(root1))
vmin = (root1 - m) * np.exp(0.5 * d1)
vmax = (root2 - m) * np.exp(0.5 * d2)
umax = 1 # umax = sqrt(h(m)) = 1
logqpdf = lambda x: self._logquasipdf(x, p, b) - lm
c = m
else:
# ratio of uniforms without mode shift
# compute np.sqrt(quasipdf(m))
umax = np.exp(0.5*self._logquasipdf(m, p, b))
xplus = ((1 + p) + np.sqrt((1 + p)**2 + b**2))/b
vmin = 0
# compute xplus * np.sqrt(quasipdf(xplus))
vmax = xplus * np.exp(0.5 * self._logquasipdf(xplus, p, b))
c = 0
logqpdf = lambda x: self._logquasipdf(x, p, b)
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
i = 1
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u = umax * random_state.uniform(size=k)
v = random_state.uniform(size=k)
v = vmin + (vmax - vmin) * v
rvs = v / u + c
# rewrite acceptance condition u**2 <= pdf(rvs) by taking logs
accept = (2*np.log(u) <= logqpdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated "
"in {} attempts. Sampling does not appear to "
"work for the provided parameters.".format(i*N))
raise RuntimeError(msg)
i += 1
else:
# use new algorithm in [2]
x0 = b / (1 - p)
xs = np.max((x0, 2 / b))
k1 = np.exp(self._logquasipdf(m, p, b))
A1 = k1 * x0
if x0 < 2 / b:
k2 = np.exp(-b)
if p > 0:
A2 = k2 * ((2 / b)**p - x0**p) / p
else:
A2 = k2 * np.log(2 / b**2)
else:
k2, A2 = 0, 0
k3 = xs**(p - 1)
A3 = 2 * k3 * np.exp(-xs * b / 2) / b
A = A1 + A2 + A3
# [2]: rejection constant is < 2.73; so expected runtime is finite
while simulated < N:
k = N - simulated
h, rvs = np.zeros(k), np.zeros(k)
# simulate uniform rvs on [x1, x2] and [0, y2]
u = random_state.uniform(size=k)
v = A * random_state.uniform(size=k)
cond1 = v <= A1
cond2 = np.logical_not(cond1) & (v <= A1 + A2)
cond3 = np.logical_not(cond1 | cond2)
# subdomain (0, x0)
rvs[cond1] = x0 * v[cond1] / A1
h[cond1] = k1
# subdomain (x0, 2 / b)
if p > 0:
rvs[cond2] = (x0**p + (v[cond2] - A1) * p / k2)**(1 / p)
else:
rvs[cond2] = b * np.exp((v[cond2] - A1) * np.exp(b))
h[cond2] = k2 * rvs[cond2]**(p - 1)
# subdomain (xs, infinity)
z = np.exp(-xs * b / 2) - b * (v[cond3] - A1 - A2) / (2 * k3)
rvs[cond3] = -2 / b * np.log(z)
h[cond3] = k3 * np.exp(-rvs[cond3] * b / 2)
# apply rejection method
accept = (np.log(u * h) <= self._logquasipdf(rvs, p, b))
num_accept = sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
rvs = np.reshape(x, size1d)
if invert_res:
rvs = 1 / rvs
return rvs
def _mode(self, p, b):
# distinguish cases to avoid catastrophic cancellation (see [2])
if p < 1:
return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
else:
return (np.sqrt((1 - p)**2 + b**2) - (1 - p)) / b
def _munp(self, n, p, b):
num = sc.kve(p + n, b)
denom = sc.kve(p, b)
inf_vals = np.isinf(num) | np.isinf(denom)
if inf_vals.any():
msg = ("Infinite values encountered in the moment calculation "
"involving scipy.special.kve. Values replaced by NaN to "
"avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
m = np.full_like(num, np.nan, dtype=np.double)
m[~inf_vals] = num[~inf_vals] / denom[~inf_vals]
else:
m = num / denom
return m
geninvgauss = geninvgauss_gen(a=0.0, name="geninvgauss")
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = \frac{a \, K_1(a \sqrt{1 + x^2})}{\pi \sqrt{1 + x^2}} \,
\exp(\sqrt{a^2 - b^2} + b x)
where :math:`x` is a real number, the parameter :math:`a` is the tail
heaviness and :math:`b` is the asymmetry parameter satisfying
:math:`a > 0` and :math:`|b| <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
`Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
`invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
References
----------
O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and Stochastic
Volatility Modelling", Scandinavian Journal of Statistics, Vol. 24,
pp. 1-13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _rvs(self, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
ig = invgauss.rvs(mu=1/gamma, size=size, random_state=random_state)
return b * ig + np.sqrt(ig) * norm.rvs(size=size, random_state=random_state)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
u"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \\exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
for :math:`0 <= x < =1` and :math:`a, b > 0`, and :math:`\phi` is the normal
pdf.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
for all :math:`x, a, b > 0`, and :math:`\phi` is the normal pdf.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.laplace(0, 1, size=size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the Laplace distribution parameters, so the keyword
arguments `loc`, `scale`, and `optimizer` are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_check_fit_input_parameters(data, args,
kwds, fixed_param=(floc, fscale))
# MLE for the laplace distribution
if floc is None:
loc = np.median(data)
else:
loc = floc
if fscale is None:
scale = (np.sum(np.abs(data - loc))) / len(data)
else:
scale = fscale
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 124
return loc, scale
laplace = laplace_gen(name='laplace')
def _check_fit_input_parameters(data, args, kwds, fixed_param):
if len(args) > 0:
raise TypeError("Too many arguments.")
_remove_optimizer_parameters(kwds)
if None not in fixed_param:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise RuntimeError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
for :math:`x >= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
for :math:`x <= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
r"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
The distribution for `levy_stable` has characteristic function:
.. math::
\varphi(t, \alpha, \beta, c, \mu) =
e^{it\mu -|ct|^{\alpha}(1-i\beta \operatorname{sign}(t)\Phi(\alpha, t))}
where:
.. math::
\Phi = \begin{cases}
\tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |t|&\alpha =1
\end{cases}
The probability density function for `levy_stable` is:
.. math::
f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt
where :math:`-\infty < t < \infty`. This integral does not have a known closed form.
For evaluation of pdf we use either Zolotarev :math:`S_0` parameterization with integration,
direct integration of standard parameterization of characteristic function or FFT of
characteristic function. If set to other than None and if number of points is greater than
``levy_stable.pdf_fft_min_points_threshold`` (defaults to None) we use FFT otherwise we use one
of the other methods.
The default method is 'best' which uses Zolotarev's method if alpha = 1 and integration of
characteristic function otherwise. The default method can be changed by setting
``levy_stable.pdf_default_method`` to either 'zolotarev', 'quadrature' or 'best'.
To increase accuracy of FFT calculation one can specify ``levy_stable.pdf_fft_grid_spacing``
(defaults to 0.001) and ``pdf_fft_n_points_two_power`` (defaults to a value that covers the
input range * 4). Setting ``pdf_fft_n_points_two_power`` to 16 should be sufficiently accurate
in most cases at the expense of CPU time.
For evaluation of cdf we use Zolatarev :math:`S_0` parameterization with integration or integral of
the pdf FFT interpolated spline. The settings affecting FFT calculation are the same as
for pdf calculation. Setting the threshold to ``None`` (default) will disable FFT. For cdf
calculations the Zolatarev method is superior in accuracy, so FFT is disabled by default.
Fitting estimate uses quantile estimation method in [MC]. MLE estimation of parameters in
fit method uses this quantile estimate initially. Note that MLE doesn't always converge if
using FFT for pdf calculations; so it's best that ``pdf_fft_min_points_threshold`` is left unset.
.. warning::
For pdf calculations implementation of Zolatarev is unstable for values where alpha = 1 and
beta != 0. In this case the quadrature method is recommended. FFT calculation is also
considered experimental.
For cdf calculations FFT calculation is considered experimental. Use Zolatarev's method
instead (default).
%(after_notes)s
References
----------
.. [MC] McCulloch, J., 1986. Simple consistent estimators of stable distribution parameters.
Communications in Statistics - Simulation and Computation 15, 11091136.
.. [MS] Mittnik, S.T. Rachev, T. Doganoglu, D. Chenyao, 1999. Maximum likelihood estimation
of stable Paretian models, Mathematical and Computer Modelling, Volume 29, Issue 10,
1999, Pages 275-293.
.. [BS] Borak, S., Hardle, W., Rafal, W. 2005. Stable distributions, Economic Risk.
%(example)s
"""
def _rvs(self, alpha, beta, size=None, random_state=None):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
alpha = np.broadcast_to(alpha, size)
beta = np.broadcast_to(beta, size)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=size,
random_state=random_state)
W = expon.rvs(size=size, random_state=random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
@staticmethod
def _cf(t, alpha, beta):
Phi = lambda alpha, t: np.tan(np.pi*alpha/2) if alpha != 1 else -2.0*np.log(np.abs(t))/np.pi
return np.exp(-(np.abs(t)**alpha)*(1-1j*beta*np.sign(t)*Phi(alpha, t)))
@staticmethod
def _pdf_from_cf_with_fft(cf, h=0.01, q=9):
"""Calculates pdf from cf using fft. Using region around 0 with N=2**q points
separated by distance h. As suggested by [MS].
"""
N = 2**q
n = np.arange(1,N+1)
density = ((-1)**(n-1-N/2))*np.fft.fft(((-1)**(n-1))*cf(2*np.pi*(n-1-N/2)/h/N))/h/N
x = (n-1-N/2)*h
return (x, density)
@staticmethod
def _pdf_single_value_best(x, alpha, beta):
if alpha != 1. or (alpha == 1. and beta == 0.):
return levy_stable_gen._pdf_single_value_zolotarev(x, alpha, beta)
else:
return levy_stable_gen._pdf_single_value_cf_integrate(x, alpha, beta)
@staticmethod
def _pdf_single_value_cf_integrate(x, alpha, beta):
cf = lambda t: levy_stable_gen._cf(t, alpha, beta)
return integrate.quad(lambda t: np.real(np.exp(-1j*t*x)*cf(t)), -np.inf, np.inf, limit=1000)[0]/np.pi/2
@staticmethod
def _pdf_single_value_zolotarev(x, alpha, beta):
"""Calculate pdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
def g(theta):
return V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1)))
def f(theta):
return g(theta) * np.exp(-g(theta))
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
return 0.
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-xi, np.pi/2])
intg_kwargs = {}
# windows quadpack less forgiving with points out of bounds
if intg_max.success and not np.isnan(intg_max.fun)\
and intg_max.x > -xi and intg_max.x < np.pi/2:
intg_kwargs["points"] = [intg_max.x]
intg = integrate.quad(f, -xi, np.pi/2, **intg_kwargs)[0]
return alpha * intg / np.pi / np.abs(alpha-1) / (x0-zeta)
elif x0 == zeta:
return sc.gamma(1+1/alpha)*np.cos(xi)/np.pi/((1+zeta**2)**(1/alpha/2))
else:
return levy_stable_gen._pdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta != 0:
warnings.warn('Density calculation unstable for alpha=1 and beta!=0.' +
' Use quadrature method instead.', RuntimeWarning)
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
def g(theta):
return np.exp(-np.pi * x / 2. / beta) * V(theta)
def f(theta):
return g(theta) * np.exp(-g(theta))
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-np.pi/2, np.pi/2])
intg = integrate.fixed_quad(f, -np.pi/2, intg_max.x)[0] + integrate.fixed_quad(f, intg_max.x, np.pi/2)[0]
return intg / np.abs(beta) / 2.
else:
return 1/(1+x**2)/np.pi
@staticmethod
def _cdf_single_value_zolotarev(x, alpha, beta):
"""Calculate cdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
c_1 = 1 if alpha > 1 else .5 - xi/np.pi
def f(theta):
return np.exp(-V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1))))
with np.errstate(all="ignore"):
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
intg = 0
else:
intg = integrate.quad(f, -xi, np.pi/2)[0]
return c_1 + np.sign(1-alpha) * intg / np.pi
elif x0 == zeta:
return .5 - xi/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta > 0:
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
with np.errstate(all="ignore"):
expr_1 = np.exp(-np.pi*x/beta/2.)
int_1 = integrate.quad(lambda theta: np.exp(-expr_1 * V(theta)), -np.pi/2, np.pi/2)[0]
return int_1 / np.pi
elif beta == 0:
return .5 + np.arctan(x)/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, 1, -beta)
def _pdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
pdf_default_method_name = getattr(self, 'pdf_default_method', 'best')
if pdf_default_method_name == 'best':
pdf_single_value_method = levy_stable_gen._pdf_single_value_best
elif pdf_default_method_name == 'zolotarev':
pdf_single_value_method = levy_stable_gen._pdf_single_value_zolotarev
else:
pdf_single_value_method = levy_stable_gen._pdf_single_value_cf_integrate
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack(list({tuple(row) for row in
data_in[:, 1:]}))
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([pdf_single_value_method(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn('Density calculations experimental for FFT method.' +
' Use combination of zolatarev and quadrature methods instead.', RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = np.ceil(np.log(2*np.max(np.abs(_x))/h)/np.log(2)) + 2 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.interp1d(density_x, np.real(density))
data_out[data_mask] = f(_x)
return data_out.T[0]
def _cdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack(
list({tuple(row) for row in data_in[:,1:]}))
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([levy_stable._cdf_single_value_zolotarev(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn(u'FFT method is considered experimental for ' +
u'cumulative distribution function ' +
u'evaluations. Use Zolotarev’s method instead).',
RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = 16 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.InterpolatedUnivariateSpline(density_x, np.real(density))
data_out[data_mask] = np.array([f.integral(self.a, x_1) for x_1 in _x]).reshape(data_out[data_mask].shape)
return data_out.T[0]
def _fitstart(self, data):
# We follow McCullock 1986 method - Simple Consistent Estimators
# of Stable Distribution Parameters
# Table III and IV
nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4, 5, 6, 8, 10, 15, 25]
nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1]
# table III - alpha = psi_1(nu_alpha, nu_beta)
alpha_table = [
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924],
[1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829],
[1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745],
[1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676],
[1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547],
[1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438],
[1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318],
[1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150],
[1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973],
[1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874],
[0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769],
[0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691],
[0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597],
[0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]
# table IV - beta = psi_2(nu_alpha, nu_beta)
beta_table = [
[0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000],
[0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000],
[0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000],
[0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000],
[0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000],
[0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000],
[0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000],
[0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000],
[0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195],
[0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917],
[0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759],
[0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596],
[0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482],
[0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362],
[0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]
# Table V and VII
alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1, 0.9, 0.8, 0.7, 0.6, 0.5]
beta_range = [0, 0.25, 0.5, 0.75, 1]
# Table V - nu_c = psi_3(alpha, beta)
nu_c_table = [
[1.908, 1.908, 1.908, 1.908, 1.908],
[1.914, 1.915, 1.916, 1.918, 1.921],
[1.921, 1.922, 1.927, 1.936, 1.947],
[1.927, 1.930, 1.943, 1.961, 1.987],
[1.933, 1.940, 1.962, 1.997, 2.043],
[1.939, 1.952, 1.988, 2.045, 2.116],
[1.946, 1.967, 2.022, 2.106, 2.211],
[1.955, 1.984, 2.067, 2.188, 2.333],
[1.965, 2.007, 2.125, 2.294, 2.491],
[1.980, 2.040, 2.205, 2.435, 2.696],
[2.000, 2.085, 2.311, 2.624, 2.973],
[2.040, 2.149, 2.461, 2.886, 3.356],
[2.098, 2.244, 2.676, 3.265, 3.912],
[2.189, 2.392, 3.004, 3.844, 4.775],
[2.337, 2.634, 3.542, 4.808, 6.247],
[2.588, 3.073, 4.534, 6.636, 9.144]]
# Table VII - nu_zeta = psi_5(alpha, beta)
nu_zeta_table = [
[0, 0.000, 0.000, 0.000, 0.000],
[0, -0.017, -0.032, -0.049, -0.064],
[0, -0.030, -0.061, -0.092, -0.123],
[0, -0.043, -0.088, -0.132, -0.179],
[0, -0.056, -0.111, -0.170, -0.232],
[0, -0.066, -0.134, -0.206, -0.283],
[0, -0.075, -0.154, -0.241, -0.335],
[0, -0.084, -0.173, -0.276, -0.390],
[0, -0.090, -0.192, -0.310, -0.447],
[0, -0.095, -0.208, -0.346, -0.508],
[0, -0.098, -0.223, -0.380, -0.576],
[0, -0.099, -0.237, -0.424, -0.652],
[0, -0.096, -0.250, -0.469, -0.742],
[0, -0.089, -0.262, -0.520, -0.853],
[0, -0.078, -0.272, -0.581, -0.997],
[0, -0.061, -0.279, -0.659, -1.198]]
psi_1 = interpolate.interp2d(nu_beta_range, nu_alpha_range, alpha_table, kind='linear')
psi_2 = interpolate.interp2d(nu_beta_range, nu_alpha_range, beta_table, kind='linear')
psi_2_1 = lambda nu_beta, nu_alpha: psi_2(nu_beta, nu_alpha) if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha)
phi_3 = interpolate.interp2d(beta_range, alpha_range, nu_c_table, kind='linear')
phi_3_1 = lambda beta, alpha: phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha)
phi_5 = interpolate.interp2d(beta_range, alpha_range, nu_zeta_table, kind='linear')
phi_5_1 = lambda beta, alpha: phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha)
# quantiles
p05 = np.percentile(data, 5)
p50 = np.percentile(data, 50)
p95 = np.percentile(data, 95)
p25 = np.percentile(data, 25)
p75 = np.percentile(data, 75)
nu_alpha = (p95 - p05)/(p75 - p25)
nu_beta = (p95 + p05 - 2*p50)/(p95 - p05)
if nu_alpha >= 2.439:
alpha = np.clip(psi_1(nu_beta, nu_alpha)[0], np.finfo(float).eps, 2.)
beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0], -1., 1.)
else:
alpha = 2.0
beta = np.sign(nu_beta)
c = (p75 - p25) / phi_3_1(beta, alpha)[0]
zeta = p50 + c*phi_5_1(beta, alpha)[0]
delta = np.clip(zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha == 1. else zeta, np.finfo(float).eps, np.inf)
return (alpha, beta, delta, c)
def _stats(self, alpha, beta):
mu = 0 if alpha > 1 else np.nan
mu2 = 2 if alpha == 2 else np.inf
g1 = 0. if alpha == 2. else np.NaN
g2 = 0. if alpha == 2. else np.NaN
return mu, mu2, g1, g2
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.logistic(size=size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\Gamma(c)}
for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`loggamma` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return np.log(random_state.gamma(c, size=size))
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for :math:`c > 0`.
`loglaplace` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp\left(-\frac{\log^2(x)}{2s^2}\right)
for :math:`x > 0`, :math:`s > 0`.
`lognorm` takes ``s`` as a shape parameter for :math:`s`.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s, size=None, random_state=None):
return np.exp(s * random_state.standard_normal(size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(lognorm_gen, self).fit(data, *args, **kwds)
f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
kwds.get('fix_s', None))
fscale = kwds.get('fscale', None)
if len(args) > 1:
raise TypeError("Too many input arguments.")
for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
'optimizer']:
kwds.pop(name, None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# Special case: loc is fixed. Use the maximum likelihood formulas
# instead of the numerical solver.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
floc = float(floc)
if floc != 0:
# Shifting the data by floc. Don't do the subtraction in-place,
# because `data` might be a view of the input array.
data = data - floc
if np.any(data <= 0):
raise FitDataError("lognorm", lower=floc, upper=np.inf)
lndata = np.log(data)
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free.
scale = np.exp(lndata.mean())
if f0 is None:
# shape is free.
shape = lndata.std()
else:
# shape is fixed.
shape = float(f0)
else:
# scale is fixed, shape is free
scale = float(fscale)
shape = np.sqrt(((lndata - np.log(scale))**2).mean())
return shape, floc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
r"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gilbrat` is a special case of `lognorm` with ``s=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return np.exp(random_state.standard_normal(size))
def _pdf(self, x):
# gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for :math:`x >= 0`.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return chi.rvs(3.0, size=size, random_state=random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return _SQRT_2_OVER_PI*x*x*np.exp(-x*x/2.0)
def _logpdf(self, x):
return _LOG_SQRT_2_OVER_PI + 2*np.log(x) - 0.5*x*x
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke Beta-Kappa / Dagum continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for :math:`x > 0` and :math:`k, s > 0`. The distribution is sometimes
called Dagum distribution ([2]_). It was already defined in [3]_, called
a Burr Type III distribution (`burr` with parameters ``c=s`` and
``d=k/s``).
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
References
----------
.. [1] Mielke, P.W., 1973 "Another Family of Distributions for Describing
and Analyzing Precipitation Data." J. Appl. Meteor., 12, 275-280
.. [2] Dagum, C., 1977 "A new model for personal income distribution."
Economie Appliquee, 33, 327-367.
.. [3] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
def _argcheck(self, k, s):
return (k > 0) & (s > 0)
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _logpdf(self, x, k, s):
return np.log(k) + np.log(x)*(k-1.0) - np.log1p(x**s)*(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
def _munp(self, n, k, s):
def nth_moment(n, k, s):
# n-th moment is defined for -k < n < s
return sc.gamma((k+n)/s)*sc.gamma(1-n/s)/sc.gamma(k/s)
return _lazywhere(n < s, (n, k, s), nth_moment, np.inf)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
https://doi.org/10.4236/jwarp.2012.410101
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
return h == h
def _get_support(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
_a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
_b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return _a, _b
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa3` is:
.. math::
f(x, a) = a (a + x^a)^{-(a + 1)/a}
for :math:`x > 0` and :math:`a > 0`.
`kappa3` takes ``a`` as a shape parameter for :math:`a`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
https://doi.org/10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012), https://doi.org/10.4236/ojs.2012.24050
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
# kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
for a real number :math:`x`.
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
:doi:`10.1080/14786440308521076` (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _rvs(self, size=None, random_state=None):
u1 = gamma.rvs(a = 0.5, scale = 2, size=size, random_state=random_state)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x >= 0`, :math:`\nu > 0`.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x >= 0` and :math:`k, \lambda > 0`. :math:`k` specifies the
degrees of freedom (denoted ``df`` in the implementation) and
:math:`\lambda` is the non-centrality parameter (denoted ``nc`` in the
implementation). :math:`I_\nu` denotes the modified Bessel function of
first order of degree :math:`\nu` (`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc >= 0)
def _rvs(self, df, nc, size=None, random_state=None):
return random_state.noncentral_chisquare(df, nc, size)
def _logpdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_log_pdf, f2=chi2.logpdf)
def _pdf(self, x, df, nc):
# ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
# * I[(df-2)/2](sqrt(nc*x))
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_pdf, f2=chi2.pdf)
def _cdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_cdf, f2=chi2.cdf)
def _ppf(self, q, df, nc):
cond = np.ones_like(q, dtype=bool) & (nc != 0)
return _lazywhere(cond, (q, df, nc), f=sc.chndtrix, f2=chi2.ppf)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp\left(\frac{\lambda}{2} +
\lambda n_1 \frac{x}{2(n_1 x + n_2)}
\right)
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2 + n_1 x)^{-(n_1 + n_2)/2}
\gamma(n_1/2) \gamma(1 + n_2/2) \\
\frac{L^{\frac{n_1}{2}-1}_{n_2/2}
\left(-\lambda n_1 \frac{x}{2(n_1 x + n_2)}\right)}
{B(n_1/2, n_2/2)
\gamma\left(\frac{n_1 + n_2}{2}\right)}
for :math:`n_1, n_2 > 0`, :math:`\lambda\geq 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. If ``nc=0``,
the distribution becomes equivalent to the Fisher distribution.
%(after_notes)s
See Also
--------
scipy.stats.f : Fisher distribution
%(example)s
"""
def _argcheck(self, df1, df2, nc):
return (df1 > 0) & (df2 > 0) & (nc >= 0)
def _rvs(self, dfn, dfd, nc, size=None, random_state=None):
return random_state.noncentral_f(dfn, dfd, nc, size)
def _pdf_skip(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
# Note: the rv_continuous class ensures that dfn > 0 when this function
# is called, so we don't have to check for division by zero with dfn
# in the following.
mu_num = dfd * (dfn + nc)
mu_den = dfn * (dfd - 2)
mu = np.full_like(mu_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu_num, mu_den, where=dfd > 2, out=mu)
mu2_num = 2*((dfn + nc)**2 + (dfn + 2*nc)*(dfd - 2))*(dfd/dfn)**2
mu2_den = (dfd - 2)**2 * (dfd - 4)
mu2 = np.full_like(mu2_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu2_num, mu2_den, where=dfd > 4, out=mu2)
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's t continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu/2)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _argcheck(self, df):
return df > 0
def _rvs(self, df, size=None, random_state=None):
return random_state.standard_t(df, size=size)
def _pdf(self, x, df):
# gamma((df+1)/2)
# t.pdf(x, df) = ---------------------------------------------------
# sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu = np.where(df > 1, 0.0, np.inf)
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
mu2 = np.where(df <= 1, np.nan, mu2)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.inf)
g2 = np.where(df <= 2, np.nan, g2)
return mu, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's t continuous random variable.
%(before_notes)s
Notes
-----
If :math:`Y` is a standard normal random variable and :math:`V` is
an independent chi-square random variable (`chi2`) with :math:`k` degrees
of freedom, then
.. math::
X = \frac{Y + c}{\sqrt{V/k}}
has a non-central Student's t distribution on the real line.
The degrees of freedom parameter :math:`k` (denoted ``df`` in the
implementation) satisfies :math:`k > 0` and the noncentrality parameter
:math:`c` (denoted ``nc`` in the implementation) is a real number.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc, size=None, random_state=None):
n = norm.rvs(loc=nc, size=size, random_state=random_state)
c2 = chi2.rvs(df, size=size, random_state=random_state)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. https://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
# kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, skew) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{skew stddev}
\alpha = (stddev \beta)^2
\zeta = loc - \frac{\alpha}{\beta}
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`pearson3` takes ``skew`` as a shape parameter for :math:`skew`.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew, size=None, random_state=None):
skew = np.broadcast_to(skew, size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = random_state.standard_normal(nsmall)
ans[invmask] = random_state.standard_gamma(alpha, nbig)/beta + zeta
if size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b=1``.
%(example)s
"""
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
# powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
# (Phi(-log(x)/s))**(c-1),
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x >= 0`, :math:`c > 0`.
`powernorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed (symmetric beta) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`. `rdist` is also called the
symmetric beta distribution: if B has a `beta` distribution with
parameters (c/2, c/2), then X = 2*B - 1 follows a R-distribution with
parameter c.
`rdist` takes ``c`` as a shape parameter for :math:`c`.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 3: `semicircular`
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
# use relation to the beta distribution for pdf, cdf, etc
def _pdf(self, x, c):
return 0.5*beta._pdf((x + 1)/2, c/2, c/2)
def _logpdf(self, x, c):
return -np.log(2) + beta._logpdf((x + 1)/2, c/2, c/2)
def _cdf(self, x, c):
return beta._cdf((x + 1)/2, c/2, c/2)
def _ppf(self, q, c):
return 2*beta._ppf(q, c/2, c/2) - 1
def _rvs(self, c, size=None, random_state=None):
return 2 * random_state.beta(c/2, c/2, size) - 1
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(x) = x \exp(-x^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df=2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return chi.rvs(2, size=size, random_state=random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A loguniform or reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for this class is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`b > a > 0`. This class takes
:math:`a` and :math:`b` as shape parameters. %(after_notes)s
%(example)s
This doesn't show the equal probability of ``0.01``, ``0.1`` and
``1``. This is best when the x-axis is log-scaled:
>>> import numpy as np
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log10(r))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$10^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
This random variable will be log-uniform regardless of the base chosen for
``a`` and ``b``. Let's specify with base ``2`` instead:
>>> rvs = %(name)s(2**-2, 2**0).rvs(size=1000)
Values of ``1/4``, ``1/2`` and ``1`` are equally likely with this random
variable. Here's the histogram:
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log2(rvs))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$2^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
"""
def _argcheck(self, a, b):
return (a > 0) & (b > a)
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
return 1.0 / (x * np.log(b * 1.0 / a))
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(np.log(b * 1.0 / a))
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / np.log(b * 1.0 / a)
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/np.log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b*1.0/a))
loguniform = reciprocal_gen(name="loguniform")
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
for :math:`x >= 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
function of order zero (`scipy.special.i0`).
`rice` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b, size=None, random_state=None):
# https://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + random_state.standard_normal(size=(2,) + size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
\exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
for :math:`x \ge 0`.
`recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu, size=None, random_state=None):
return 1.0/random_state.wald(mu, 1.0, size=size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
The distribution is a special case of `rdist` with `c = 3`.
%(after_notes)s
See Also
--------
rdist
References
----------
.. [1] "Wigner semicircle distribution",
https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _logpdf(self, x):
return np.log(2/np.pi) + 0.5*np.log1p(-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _ppf(self, q):
return rdist._ppf(q, 3)
def _rvs(self, size=None, random_state=None):
# generate values uniformly distributed on the area under the pdf
# (semi-circle) by randomly generating the radius and angle
r = np.sqrt(random_state.uniform(size=size))
a = np.cos(np.pi * random_state.uniform(size=size))
return r * a
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
https://arxiv.org/abs/0911.2093
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
if x <= 0:
cdf = integrate.quad(self._pdf, _a, x, args=args)[0]
else:
t1 = integrate.quad(self._pdf, _a, 0, args=args)[0]
t2 = integrate.quad(self._pdf, 0, x, args=args)[0]
cdf = t1 + t2
if cdf > 1:
# Presumably numerical noise, e.g. 1.0000000000000002
cdf = 1.0
return cdf
def _sf(self, x, a):
return self._cdf(-x, -a)
def _rvs(self, a, size=None, random_state=None):
u0 = random_state.normal(size=size)
v = random_state.normal(size=size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. This
defines the trapezoid base from ``loc`` to ``(loc+scale)`` and the flat
top from ``c`` to ``d`` proportional to the position along the base
with ``0 <= c <= d <= 1``. When ``c=d``, this is equivalent to `triang`
with the same values for `loc`, `scale` and `c`.
The method of [1]_ is used for computing moments.
`trapz` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
References
----------
.. [1] Kacker, R.N. and Lawrence, J.F. (2007). Trapezoidal and triangular
distributions for Type B evaluation of standard uncertainty.
Metrologia 44, 117–127. https://doi.org/10.1088/0026-1394/44/2/003
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
def _munp(self, n, c, d):
# Using the parameterization from Kacker, 2007, with
# a=bottom left, c=top left, d=top right, b=bottom right, then
# E[X^n] = h/(n+1)/(n+2) [(b^{n+2}-d^{n+2})/(b-d)
# - ((c^{n+2} - a^{n+2})/(c-a)]
# with h = 2/((b-a) - (d-c)). The corresponding parameterization
# in scipy, has a'=loc, c'=loc+c*scale, d'=loc+d*scale, b'=loc+scale,
# which for standard form reduces to a'=0, b'=1, c'=c, d'=d.
# Substituting into E[X^n] gives the bd' term as (1 - d^{n+2})/(1 - d)
# and the ac' term as c^{n-1} for the standard form. The bd' term has
# numerical difficulties near d=1, so replace (1 - d^{n+2})/(1-d)
# with expm1((n+2)*log(d))/(d-1).
# Testing with n=18 for c=(1e-30,1-eps) shows that this is stable.
# We still require an explicit test for d=1 to prevent divide by zero,
# and now a test for d=0 to prevent log(0).
ab_term = c**(n+1)
dc_term = _lazyselect(
[d == 0.0, (0.0 < d) & (d < 1.0), d == 1.0],
[lambda d: 1.0,
lambda d: np.expm1((n+2) * np.log(d)) / (d-1.0),
lambda d: n+2],
[d])
val = 2.0 / (1.0+d-c) * (dc_term - ab_term) / ((n+1) * (n+2))
return val
def _entropy(self, c, d):
# Using the parameterization from Wikipedia (van Dorp, 2003)
# with a=bottom left, c=top left, d=top right, b=bottom right
# gives a'=loc, b'=loc+c*scale, c'=loc+d*scale, d'=loc+scale,
# which for loc=0, scale=1 is a'=0, b'=c, c'=d, d'=1.
# Substituting into the entropy formula from Wikipedia gives
# the following result.
return 0.5 * (1.0-d+c) / (1.0+d-c) + np.log(0.5 * (1.0+d-c))
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc + scale)``.
`triang` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return random_state.triangular(0, c, 1, size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 <= x <= b`.
`truncexpon` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
return b > 0
def _get_support(self, b):
return self.a, b
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
TRUNCNORM_TAIL_X = 30
TRUNCNORM_MAX_BRENT_ITERS = 40
def _truncnorm_get_delta_scalar(a, b):
if (a > TRUNCNORM_TAIL_X) or (b < -TRUNCNORM_TAIL_X):
return 0
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
return delta
def _truncnorm_get_delta(a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_get_delta_scalar(a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_get_delta_scalar(a.item(), b.item())
delta = np.zeros(np.shape(a))
condinner = (a <= TRUNCNORM_TAIL_X) & (b >= -TRUNCNORM_TAIL_X)
conda = (a > 0) & condinner
condb = (a <= 0) & condinner
if np.any(conda):
np.place(delta, conda, _norm_sf(a[conda]) - _norm_sf(b[conda]))
if np.any(condb):
np.place(delta, condb, _norm_cdf(b[condb]) - _norm_cdf(a[condb]))
delta[delta < 0] = 0
return delta
def _truncnorm_get_logdelta_scalar(a, b):
if (a <= TRUNCNORM_TAIL_X) and (b >= -TRUNCNORM_TAIL_X):
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
if delta > 0:
return np.log(delta)
if b < 0 or (np.abs(a) >= np.abs(b)):
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
logdelta = nlb + np.log1p(-np.exp(nla - nlb))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
logdelta = sla + np.log1p(-np.exp(slb - sla))
return logdelta
def _truncnorm_logpdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return -np.inf
if x > b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, -np.inf)
if np.any(condgtb):
np.place(out, condgtb, -np.inf)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
_logdelta = _truncnorm_get_logdelta_scalar(a, b)
np.place(out, cond_inner, _norm_logpdf(x[cond_inner]) - _logdelta)
return (out[0] if (shp == ()) else out)
def _truncnorm_pdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return 0.0
if x > b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, 0.0)
if np.any(condgtb):
np.place(out, condgtb, 0.0)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, _norm_pdf(x[cond_inner]) / delta)
else:
np.place(out, cond_inner,
np.exp(_truncnorm_logpdf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logcdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -np.inf
if x >= b:
return 0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, -np.inf)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
np.log((_norm_cdf(x[cond_inner]) - _norm_cdf(a)) / delta))
else:
with np.errstate(divide='ignore'):
if a < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
tab = np.log1p(-np.exp(nla - nlb))
nlx = _norm_logcdf(x[cond_inner])
tax = np.log1p(-np.exp(nla - nlx))
np.place(out, cond_inner, nlx + tax - (nlb + tab))
else:
sla = _norm_logsf(a)
slb = _norm_logsf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logsf(x[cond_inner]) - sla))
- np.log1p(-np.exp(slb - sla)))
return (out[0] if (shp == ()) else out)
def _truncnorm_cdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -0
if x >= b:
return 1
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, 1.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
(_norm_cdf(x[cond_inner]) - _norm_cdf(a)) / delta)
else:
with np.errstate(divide='ignore'):
np.place(out, cond_inner,
np.exp(_truncnorm_logcdf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logsf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 0.0
if x >= b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, -np.inf)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, np.log((_norm_sf(x[cond_inner]) - _norm_sf(b)) / delta))
else:
with np.errstate(divide='ignore'):
if b < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logcdf(x[cond_inner]) - nlb))
- np.log1p(-np.exp(nla - nlb)))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
tab = np.log1p(-np.exp(slb - sla))
slx = _norm_logsf(x[cond_inner])
tax = np.log1p(-np.exp(slb - slx))
np.place(out, cond_inner, slx + tax - (sla + tab))
return (out[0] if (shp == ()) else out)
def _truncnorm_sf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 1.0
if x >= b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 1.0)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, (_norm_sf(x[cond_inner]) - _norm_sf(b)) / delta)
else:
np.place(out, cond_inner, np.exp(_truncnorm_logsf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _norm_logcdfprime(z):
# derivative of special.log_ndtr (See special/cephes/ndtr.c)
# Differentiate formula for log Phi(z)_truncnorm_ppf
# log Phi(z) = -z^2/2 - log(-z) - log(2pi)/2 + log(1 + sum (-1)^n (2n-1)!! / z^(2n))
# Convergence of series is slow for |z| < 10, but can use d(log Phi(z))/dz = dPhi(z)/dz / Phi(z)
# Just take the first 10 terms because that is sufficient for use in _norm_ilogcdf
assert np.all(z <= -10)
lhs = -z - 1/z
denom_cons = 1/z**2
numerator = 1
pwr = 1.0
denom_total, numerator_total = 0, 0
sign = -1
for i in range(1, 11):
pwr *= denom_cons
numerator *= 2 * i - 1
term = sign * numerator * pwr
denom_total += term
numerator_total += term * (2 * i) / z
sign = -sign
return lhs - numerator_total / (1 + denom_total)
def _norm_ilogcdf(y):
"""Inverse function to _norm_logcdf==sc.log_ndtr."""
# Apply approximate Newton-Raphson
# Only use for very negative values of y.
# At minimum requires y <= -(log(2pi)+2^2)/2 ~= -2.9
# Much better convergence for y <= -10
z = -np.sqrt(-2 * (y + np.log(2*np.pi)/2))
for _ in range(4):
z = z - (_norm_logcdf(z) - y) / _norm_logcdfprime(z)
return z
def _truncnorm_ppf_scalar(q, a, b):
shp = np.shape(q)
q = np.atleast_1d(q)
out = np.zeros(np.shape(q))
condle0, condge1 = (q <= 0), (q >= 1)
if np.any(condle0):
out[condle0] = a
if np.any(condge1):
out[condge1] = b
delta = _truncnorm_get_delta_scalar(a, b)
cond_inner = ~condle0 & ~condge1
if np.any(cond_inner):
qinner = q[cond_inner]
if delta > 0:
if a > 0:
sa, sb = _norm_sf(a), _norm_sf(b)
np.place(out, cond_inner,
_norm_isf(qinner * sb + sa * (1.0 - qinner)))
else:
na, nb = _norm_cdf(a), _norm_cdf(b)
np.place(out, cond_inner, _norm_ppf(qinner * nb + na * (1.0 - qinner)))
elif np.isinf(b):
np.place(out, cond_inner,
-_norm_ilogcdf(np.log1p(-qinner) + _norm_logsf(a)))
elif np.isinf(a):
np.place(out, cond_inner,
_norm_ilogcdf(np.log(q) + _norm_logcdf(b)))
else:
if b < 0:
# Solve norm_logcdf(x) = norm_logcdf(a) + log1p(q * (expm1(norm_logcdf(b) - norm_logcdf(a)))
# = nla + log1p(q * expm1(nlb - nla))
# = nlb + log(q) + log1p((1-q) * exp(nla - nlb)/q)
def _f_cdf(x, c):
return _norm_logcdf(x) - c
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
values = nlb + np.log(q[cond_inner])
C = np.exp(nla - nlb)
if C:
one_minus_q = (1 - q)[cond_inner]
values += np.log1p(one_minus_q * C / q[cond_inner])
x = [optimize.zeros.brentq(_f_cdf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS)for c in values]
np.place(out, cond_inner, x)
else:
# Solve norm_logsf(x) = norm_logsf(b) + log1p((1-q) * (expm1(norm_logsf(a) - norm_logsf(b)))
# = slb + log1p((1-q)[cond_inner] * expm1(sla - slb))
# = sla + log(1-q) + log1p(q * np.exp(slb - sla)/(1-q))
def _f_sf(x, c):
return _norm_logsf(x) - c
sla, slb = _norm_logsf(a), _norm_logsf(b)
one_minus_q = (1-q)[cond_inner]
values = sla + np.log(one_minus_q)
C = np.exp(slb - sla)
if C:
values += np.log1p(q[cond_inner] * C / one_minus_q)
x = [optimize.zeros.brentq(_f_sf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS) for c in values]
np.place(out, cond_inner, x)
out[out < a] = a
out[out > b] = b
return (out[0] if (shp == ()) else out)
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return a < b
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_pdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_pdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly','allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_pdf_scalar(_x, _a, _b)
return it.operands[3]
def _logpdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logpdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logpdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly','allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_logpdf_scalar(_x, _a, _b)
return it.operands[3]
def _cdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_cdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_cdf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_cdf_scalar(_x, _a, _b)
return it.operands[3]
def _logcdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logcdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logcdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logcdf_scalar(_x, _a, _b)
return it.operands[3]
def _sf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_sf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_sf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_sf_scalar(_x, _a, _b)
return it.operands[3]
def _logsf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logsf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logsf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logsf_scalar(_x, _a, _b)
return it.operands[3]
def _ppf(self, q, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_ppf_scalar(q, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_ppf_scalar(q, a.item(), b.item())
out = None
it = np.nditer([q, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_q, _a, _b, _x) in it:
_x[...] = _truncnorm_ppf_scalar(_q, _a, _b)
return it.operands[3]
def _munp(self, n, a, b):
def n_th_moment(n, a, b):
"""
Returns n-th moment. Defined only if n >= 0.
Function cannot broadcast due to the loop over n
"""
pA, pB = self._pdf([a, b], a, b)
probs = [pA, -pB]
moments = [0, 1]
for k in range(1, n+1):
# a or b might be infinite, and the corresponding pdf value
# is 0 in that case, but nan is returned for the
# multiplication. However, as b->infinity, pdf(b)*b**k -> 0.
# So it is safe to use _lazywhere to avoid the nan.
vals = _lazywhere(probs, [probs, [a, b]],
lambda x, y: x * y**(k-1), fillvalue=0)
mk = np.sum(vals) + (k-1) * moments[-2]
moments.append(mk)
return moments[-1]
return _lazywhere((n >= 0) & (a == a) & (b == b), (n, a, b),
np.vectorize(n_th_moment, otypes=[np.float]), np.nan)
def _stats(self, a, b, moments='mv'):
pA, pB = self._pdf(np.array([a, b]), a, b)
m1 = pA - pB
mu = m1
# use _lazywhere to avoid nan (See detailed comment in _munp)
probs = [pA, -pB]
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y,
fillvalue=0)
m2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a-mu, b-mu]], lambda x, y: x*y,
fillvalue=0)
# mu2 = m2 - mu**2, but not as numerically stable as:
# mu2 = (a-mu)*pA - (b-mu)*pB + 1
mu2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**2,
fillvalue=0)
m3 = 2*m1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**3,
fillvalue=0)
m4 = 3*m2 + np.sum(vals)
mu3 = m3 + m1 * (-3*m2 + 2*m1**2)
g1 = mu3 / np.power(mu2, 1.5)
mu4 = m4 + m1*(-4*m3 + 3*m1*(2*m2 - m1**2))
g2 = mu4 / mu2**2 - 3
return mu, mu2, g1, g2
def _rvs(self, a, b, size=None, random_state=None):
# if a and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(a) and np.isscalar(b):
out = self._rvs_scalar(a, b, size, random_state=random_state)
elif a.size == 1 and b.size == 1:
out = self._rvs_scalar(a.item(), b.item(), size, random_state=random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
a, b = np.broadcast_arrays(a, b)
# a and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting a and b.
# bc is a tuple the same length as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(a.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([a, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples, random_state).reshape(shp)
it.iternext()
if size == ():
out = out[()]
return out
def _rvs_scalar(self, a, b, numsamples=None, random_state=None):
if not numsamples:
numsamples = 1
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# Calculate some rvs
U = random_state.random_sample(N)
x = self._ppf(U, a, b)
rvs = np.reshape(x, size1d)
return rvs
truncnorm = truncnorm_gen(name='truncnorm', momtype=1)
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
"data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
(ptp, fscale),
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
In the standard form, the distribution is uniform on ``[0, 1]``. Using
the parameters ``loc`` and ``scale``, one obtains the uniform distribution
on ``[loc, loc + scale]``.
%(before_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.uniform(0.0, 1.0, size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = x.ptp()
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than x.ptp(). If scale is
# greater than x.ptp(), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - x.ptp())
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = data.ptp()
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = data.ptp()
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in scipy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
`vonmises` and `vonmises_line` take ``kappa`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, kappa, size=None, random_state=None):
return random_state.vonmises(0.0, kappa, size=size)
def _pdf(self, x, kappa):
# vonmises.pdf(x, \kappa) = exp(\kappa * cos(x)) / (2*pi*I[0](\kappa))
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x >= 0`.
`wald` is a special case of `invgauss` with ``mu=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return random_state.wald(1.0, 1.0, size=size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_:
.. math::
f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta)
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For :math:`\beta = 2`, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
for :math:`x > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |\beta|)^n \exp(-\beta^2 / 2)`,
:math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
parameters. :math:`\beta` defines the point where the pdf changes
from a power-law to a Gaussian distribution. :math:`m` is the power
of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return np.exp(-x**2 / 2)
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _logpdf(self, x, beta, m):
"""
Return the log of the PDF of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return -x**2/2
def lhs(x, beta, m):
return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
_norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m+1) / (m-1))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _ppf(self, p, beta, m):
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
def ppf_less(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return (m/beta - beta -
((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
def ppf_greater(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
(1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
(m/beta)**(-m + k + 1))
return A * lhs + rhs
return N * _lazywhere(n + 1 < m, (n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float]),
np.inf)
def _argcheck(self, beta, m):
"""
Shape parameter bounds are m > 1 and beta > 0.
"""
return (m > 1) & (beta > 0)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution
used in the CDF and norm of the Argus Funktion
"""
return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(-\chi^2 (1 - x^2)/2)
for :math:`0 < x < 1` and :math:`\chi > 0`, where
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, chi):
y = 1.0 - x**2
A = chi**3 / (_norm_pdf_C * _argus_phi(chi))
return A * x * np.sqrt(y) * np.exp(-chi**2 * y / 2)
def _cdf(self, x, chi):
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
def _rvs(self, chi, size=None, random_state=None):
chi = np.asarray(chi)
if chi.size == 1:
out = self._rvs_scalar(chi, numsamples=size,
random_state=random_state)
else:
shp, bc = _check_shape(chi.shape, size)
numsamples = int(np.prod(shp))
out = np.empty(size)
it = np.nditer([chi],
flags=['multi_index'],
op_flags=[['readonly']])
while not it.finished:
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
r = self._rvs_scalar(it[0], numsamples=numsamples,
random_state=random_state)
out[idx] = r.reshape(shp)
it.iternext()
if size == ():
out = out[()]
return out
def _rvs_scalar(self, chi, numsamples=None, random_state=None):
# if chi <= 2.611:
# use rejection method, see Devroye:
# Non-Uniform Random Variate Generation, 1986, section II.3.2.
# write: self.pdf = c * g(x) * h(x), where
# h is [0,1]-valued and g is a density
# g(x) = d1 * chi**2 * x * exp(-chi**2 * (1 - x**2) / 2), 0 <= x <= 1
# h(x) = sqrt(1 - x**2), 0 <= x <= 1
# Integrating g, we get:
# G(x) = d1 * exp(-chi**2 * (1 - x**2) / 2) - d2
# d1 and d2 are determined by G(0) = 0 and G(1) = 1
# d1 = 1 / (1 - exp(-0.5 * chi**2))
# d2 = 1 / (exp(0.5 * chi**2) - 1)
# => G(x) = (exp(chi**2 * x**2 /2) - 1) / (exp(chi**2 / 2) - 1)
# expected number of iterations is c with
# c = -np.expm1(-0.5 * chi**2) * chi / (_norm_pdf_C * _argus_phi(chi))
# note that G can be inverted easily, so we can sample
# rvs from this distribution
# G_inv(y) = sqrt(2 * log(1 + (exp(chi**2 / 2) - 1) * y) / chi**2)
# to avoid an overflow of exp(chi**2 / 2), it is convenient to write
# G_inv(y) = sqrt(1 + 2 * log(exp(-chi**2 / 2) * (1-y) + y) / chi**2)
#
# if chi > 2.611:
# use ratio of uniforms method applied to a transformed variable of X
# (X is ARGUS with parameter chi):
# Y = chi * sqrt(1 - X**2) has density proportional to
# u**2 * exp(-u**2 / 2) on [0, chi] (Maxwell distribution conditioned
# on [0, chi]). Apply ratio of uniforms to this density to generate
# samples of Y and convert back to X
#
# The expected number of iterations using the rejection method
# increases with increasing chi, whereas the expected number of
# iterations using the ratio of uniforms method decreases with
# increasing chi. The crossover occurs where
# chi*(1 - exp(-0.5*chi**2)) = 8*sqrt(2)*exp(-1.5) => chi ~ 2.611
# Switching algorithms at chi=2.611 means that the expected number of
# iterations is always below 2.2.
if chi <= 2.611:
# use rejection method
size1d = tuple(np.atleast_1d(numsamples))
N = int(np.prod(size1d))
x = np.zeros(N)
echi = np.exp(-chi**2 / 2)
simulated = 0
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
# acceptance condition: u <= h(G_inv(v)). This simplifies to
z = 2 * np.log(echi * (1 - v) + v) / chi**2
accept = (u**2 + z <= 0)
num_accept = np.sum(accept)
if num_accept > 0:
# rvs follow a distribution with density g: rvs = G_inv(v)
rvs = np.sqrt(1 + z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
return np.reshape(x, size1d)
else:
# use ratio of uniforms method
def f(x):
return np.where((x >= 0) & (x <= chi),
np.exp(2*np.log(x) - x**2/2), 0)
umax = np.sqrt(2) / np.exp(0.5)
vmax = 4 / np.exp(1)
z = rvs_ratio_uniforms(f, umax, 0, vmax, size=numsamples,
random_state=random_state)
return np.sqrt(1 - z*z / chi**2)
def _stats(self, chi):
chi2 = chi**2
phi = _argus_phi(chi)
m = np.sqrt(np.pi/8) * chi * np.exp(-chi2/4) * sc.iv(1, chi2/4) / phi
v = (1 - 3 / chi2 + chi * _norm_pdf(chi) / phi) - m**2
return m, v, None, None
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, density=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self.a = self._hbins[0]
kwargs['b'] = self.b = self._hbins[-1]
super(rv_histogram, self).__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super(rv_histogram, self)._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
|
pizzathief/scipy
|
scipy/stats/_continuous_distns.py
|
Python
|
bsd-3-clause
| 268,727
|
[
"CRYSTAL",
"Gaussian"
] |
29e4d563b2215c8bdb8c6e520fbe799f9803dfb8fb7afadc8a0e6eed282c0028
|
import os, sys, re, inspect, types, errno, pprint, subprocess, io, shutil, time, copy, unittest
import path_tool
path_tool.activate_module('FactorySystem')
path_tool.activate_module('argparse')
from ParseGetPot import ParseGetPot
from socket import gethostname
#from options import *
from util import *
from RunParallel import RunParallel
from CSVDiffer import CSVDiffer
from XMLDiffer import XMLDiffer
from Tester import Tester
from PetscJacobianTester import PetscJacobianTester
from InputParameters import InputParameters
from Factory import Factory
from Parser import Parser
from Warehouse import Warehouse
import argparse
from optparse import OptionParser, OptionGroup, Values
from timeit import default_timer as clock
class TestHarness:
@staticmethod
def buildAndRun(argv, app_name, moose_dir):
if '--store-timing' in argv:
harness = TestTimer(argv, app_name, moose_dir)
elif '--testharness-unittest' in argv:
harness = TestHarnessTester(argv, app_name, moose_dir)
else:
harness = TestHarness(argv, app_name, moose_dir)
harness.findAndRunTests()
sys.exit(harness.error_code)
def __init__(self, argv, app_name, moose_dir):
self.factory = Factory()
# Build a Warehouse to hold the MooseObjects
self.warehouse = Warehouse()
# Get dependant applications and load dynamic tester plugins
# If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
sys.path.append(os.path.join(moose_dir, 'framework', 'scripts')) # For find_dep_apps.py
# Use the find_dep_apps script to get the dependant applications for an app
import find_dep_apps
depend_app_dirs = find_dep_apps.findDepApps(app_name)
dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])
# Finally load the plugins!
self.factory.loadPlugins(dirs, 'testers', Tester)
self.test_table = []
self.num_passed = 0
self.num_failed = 0
self.num_skipped = 0
self.num_pending = 0
self.host_name = gethostname()
self.moose_dir = moose_dir
self.base_dir = os.getcwd()
self.run_tests_dir = os.path.abspath('.')
self.code = '2d2d6769726c2d6d6f6465'
self.error_code = 0x0
# Assume libmesh is a peer directory to MOOSE if not defined
if os.environ.has_key("LIBMESH_DIR"):
self.libmesh_dir = os.environ['LIBMESH_DIR']
else:
self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
self.file = None
# Parse arguments
self.parseCLArgs(argv)
self.checks = {}
self.checks['platform'] = getPlatforms()
self.checks['submodules'] = getInitializedSubmodules(self.run_tests_dir)
# The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
# to select whether they want to probe for libMesh configuration options.
if self.options.skip_config_checks:
self.checks['compiler'] = set(['ALL'])
self.checks['petsc_version'] = 'N/A'
self.checks['library_mode'] = set(['ALL'])
self.checks['mesh_mode'] = set(['ALL'])
self.checks['dtk'] = set(['ALL'])
self.checks['unique_ids'] = set(['ALL'])
self.checks['vtk'] = set(['ALL'])
self.checks['tecplot'] = set(['ALL'])
self.checks['dof_id_bytes'] = set(['ALL'])
self.checks['petsc_debug'] = set(['ALL'])
self.checks['curl'] = set(['ALL'])
self.checks['tbb'] = set(['ALL'])
self.checks['superlu'] = set(['ALL'])
self.checks['slepc'] = set(['ALL'])
self.checks['unique_id'] = set(['ALL'])
self.checks['cxx11'] = set(['ALL'])
self.checks['asio'] = set(['ALL'])
else:
self.checks['compiler'] = getCompilers(self.libmesh_dir)
self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
self.checks['dtk'] = getLibMeshConfigOption(self.libmesh_dir, 'dtk')
self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
self.checks['vtk'] = getLibMeshConfigOption(self.libmesh_dir, 'vtk')
self.checks['tecplot'] = getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
self.checks['curl'] = getLibMeshConfigOption(self.libmesh_dir, 'curl')
self.checks['tbb'] = getLibMeshConfigOption(self.libmesh_dir, 'tbb')
self.checks['superlu'] = getLibMeshConfigOption(self.libmesh_dir, 'superlu')
self.checks['slepc'] = getLibMeshConfigOption(self.libmesh_dir, 'slepc')
self.checks['unique_id'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
self.checks['cxx11'] = getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
self.checks['asio'] = getIfAsioExists(self.moose_dir)
# Override the MESH_MODE option if using the '--distributed-mesh'
# or (deprecated) '--parallel-mesh' option.
if (self.options.parallel_mesh == True or self.options.distributed_mesh == True) or \
(self.options.cli_args != None and \
(self.options.cli_args.find('--parallel-mesh') != -1 or self.options.cli_args.find('--distributed-mesh') != -1)):
option_set = set(['ALL', 'PARALLEL'])
self.checks['mesh_mode'] = option_set
method = set(['ALL', self.options.method.upper()])
self.checks['method'] = method
self.initialize(argv, app_name)
"""
Recursively walks the current tree looking for tests to run
Error codes:
0x0 - Success
0x0* - Parser error
0x1* - TestHarness error
"""
def findAndRunTests(self, find_only=False):
self.error_code = 0x0
self.preRun()
self.start_time = clock()
try:
# PBS STUFF
if self.options.pbs:
# Check to see if we are using the PBS Emulator.
# Its expensive, so it must remain outside of the os.walk for loop.
self.options.PBSEmulator = self.checkPBSEmulator()
if self.options.pbs and os.path.exists(self.options.pbs):
self.options.processingPBS = True
self.processPBSResults()
else:
self.options.processingPBS = False
self.base_dir = os.getcwd()
for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
# Prune submdule paths when searching for tests
if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
dirnames[:] = []
# walk into directories that aren't contrib directories
if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
for file in filenames:
# set cluster_handle to be None initially (happens for each test)
self.options.cluster_handle = None
# See if there were other arguments (test names) passed on the command line
if file == self.options.input_file_name: #and self.test_match.search(file):
saved_cwd = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
if self.prunePath(file):
continue
# Build a Parser to parse the objects
parser = Parser(self.factory, self.warehouse)
# Parse it
self.error_code = self.error_code | parser.parse(file)
# Retrieve the tests from the warehouse
testers = self.warehouse.getActiveObjects()
# Augment the Testers with additional information directly from the TestHarness
for tester in testers:
self.augmentParameters(file, tester)
# Short circuit this loop if we've only been asked to parse Testers
# Note: The warehouse will accumulate all testers in this mode
if find_only:
self.warehouse.markAllObjectsInactive()
continue
# Clear out the testers, we won't need them to stick around in the warehouse
self.warehouse.clear()
if self.options.enable_recover:
testers = self.appendRecoverableTests(testers)
# Handle PBS tests.cluster file
if self.options.pbs:
(tester, command) = self.createClusterLauncher(dirpath, testers)
if command is not None:
self.runner.run(tester, command)
else:
# Go through the Testers and run them
for tester in testers:
# Double the alloted time for tests when running with the valgrind option
tester.setValgrindMode(self.options.valgrind_mode)
# When running in valgrind mode, we end up with a ton of output for each failed
# test. Therefore, we limit the number of fails...
if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
elif self.num_failed > self.options.max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
elif tester.parameters().isValid('error_code'):
(should_run, reason) = (False, 'skipped (Parser Error)')
else:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
command = tester.getCommand(self.options)
# This method spawns another process and allows this loop to continue looking for tests
# RunParallel will call self.testOutputAndFinish when the test has completed running
# This method will block when the maximum allowed parallel processes are running
self.runner.run(tester, command)
else: # This job is skipped - notify the runner
if reason != '':
if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
os.chdir(saved_cwd)
sys.path.pop()
except KeyboardInterrupt:
print '\nExiting due to keyboard interrupt...'
sys.exit(0)
self.runner.join()
# Wait for all tests to finish
if self.options.pbs and self.options.processingPBS == False:
print '\n< checking batch status >\n'
self.options.processingPBS = True
self.processPBSResults()
self.cleanup()
# Flags for the parser start at the low bit, flags for the TestHarness start at the high bit
if self.num_failed:
self.error_code = self.error_code | 0x80
return
def createClusterLauncher(self, dirpath, testers):
self.options.test_serial_number = 0
command = None
tester = None
# Create the tests.cluster input file
# Loop through each tester and create a job
for tester in testers:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
if self.options.cluster_handle == None:
self.options.cluster_handle = open(dirpath + '/' + self.options.pbs + '.cluster', 'w')
self.options.cluster_handle.write('[Jobs]\n')
# This returns the command to run as well as builds the parameters of the test
# The resulting command once this loop has completed is sufficient to launch
# all previous jobs
command = tester.getCommand(self.options)
self.options.cluster_handle.write('[]\n')
self.options.test_serial_number += 1
else: # This job is skipped - notify the runner
if (reason != ''):
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
# Close the tests.cluster file
if self.options.cluster_handle is not None:
self.options.cluster_handle.close()
self.options.cluster_handle = None
# Return the final tester/command (sufficient to run all tests)
return (tester, command)
def prunePath(self, filename):
test_dir = os.path.abspath(os.path.dirname(filename))
# Filter tests that we want to run
# Under the new format, we will filter based on directory not filename since it is fixed
prune = True
if len(self.tests) == 0:
prune = False # No filter
else:
for item in self.tests:
if test_dir.find(item) > -1:
prune = False
# Return the inverse of will_run to indicate that this path should be pruned
return prune
def augmentParameters(self, filename, tester):
params = tester.parameters()
# We are going to do some formatting of the path that is printed
# Case 1. If the test directory (normally matches the input_file_name) comes first,
# we will simply remove it from the path
# Case 2. If the test directory is somewhere in the middle then we should preserve
# the leading part of the path
test_dir = os.path.abspath(os.path.dirname(filename))
relative_path = test_dir.replace(self.run_tests_dir, '')
relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
relative_path = re.sub('^[/:]*', '', relative_path) # Trim slashes and colons
formatted_name = relative_path + '.' + tester.name()
params['test_name'] = formatted_name
params['test_dir'] = test_dir
params['relative_path'] = relative_path
params['executable'] = self.executable
params['hostname'] = self.host_name
params['moose_dir'] = self.moose_dir
params['base_dir'] = self.base_dir
if params.isValid('prereq'):
if type(params['prereq']) != list:
print "Option 'prereq' needs to be of type list in " + params['test_name']
sys.exit(1)
params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]
# This method splits a lists of tests into two pieces each, the first piece will run the test for
# approx. half the number of timesteps and will write out a restart file. The second test will
# then complete the run using the MOOSE recover option.
def appendRecoverableTests(self, testers):
new_tests = []
for part1 in testers:
if part1.parameters()['recover'] == True:
# Clone the test specs
part2 = copy.deepcopy(part1)
# Part 1:
part1_params = part1.parameters()
part1_params['test_name'] += '_part1'
part1_params['cli_args'].append('--half-transient Outputs/checkpoint=true')
part1_params['skip_checks'] = True
# Part 2:
part2_params = part2.parameters()
part2_params['prereq'].append(part1.parameters()['test_name'])
part2_params['delete_output_before_running'] = False
part2_params['cli_args'].append('--recover')
part2_params.addParam('caveats', ['recover'], "")
new_tests.append(part2)
testers.extend(new_tests)
return testers
## Finish the test by inspecting the raw output
def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
caveats = []
test = tester.specs # Need to refactor
if test.isValid('caveats'):
caveats = test['caveats']
if self.options.pbs and self.options.processingPBS == False:
(reason, output) = self.buildPBSBatch(output, tester)
elif self.options.dry_run:
reason = 'DRY_RUN'
output += '\n'.join(tester.processResultsCommand(self.moose_dir, self.options))
else:
(reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)
if self.options.scaling and test['scale_refine']:
caveats.append('scaled')
did_pass = True
if reason == '':
# It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
if self.options.extra_info:
checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
for check in checks:
if not 'ALL' in test[check]:
caveats.append(', '.join(test[check]))
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] OK'
elif self.options.pbs and self.options.processingPBS == False:
result = 'LAUNCHED'
else:
result = 'OK'
elif reason == 'DRY_RUN':
result = 'DRY_RUN'
else:
result = 'FAILED (%s)' % reason
did_pass = False
if self.options.pbs and self.options.processingPBS == False and did_pass == True:
# Handle the launch result, but do not add it to the results table (except if we learned that QSUB failed to launch for some reason)
self.handleTestResult(tester.specs, output, result, start, end, False)
return did_pass
else:
self.handleTestResult(tester.specs, output, result, start, end)
return did_pass
def getTiming(self, output):
time = ''
m = re.search(r"Active time=(\S+)", output)
if m != None:
return m.group(1)
def getSolveTime(self, output):
time = ''
m = re.search(r"solve().*", output)
if m != None:
return m.group().split()[5]
def checkExpectError(self, output, expect_error):
if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
#print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
return False
else:
return True
# PBS Defs
def checkPBSEmulator(self):
try:
qstat_process = subprocess.Popen(['qstat', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_output = qstat_process.communicate()
except OSError:
# qstat binary is not available
print 'qstat not available. Perhaps you need to load the PBS module?'
sys.exit(1)
if len(qstat_output[1]):
# The PBS Emulator has no --version argument, and thus returns output to stderr
return True
else:
return False
def processPBSResults(self):
# If batch file exists, check the contents for pending tests.
if os.path.exists(self.options.pbs):
# Build a list of launched jobs
batch_file = open(self.options.pbs)
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
# Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
for job in batch_list:
file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]
# Build a Warehouse to hold the MooseObjects
warehouse = Warehouse()
# Build a Parser to parse the objects
parser = Parser(self.factory, warehouse)
# Parse it
parser.parse(file)
# Retrieve the tests from the warehouse
testers = warehouse.getAllObjects()
for tester in testers:
self.augmentParameters(file, tester)
for tester in testers:
# Build the requested Tester object
if job[1] == tester.parameters()['test_name']:
# Create Test Type
# test = self.factory.create(tester.parameters()['type'], tester)
# Get job status via qstat
qstat = ['qstat', '-f', '-x', str(job[0])]
qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
if qstat_stdout != None:
output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
else:
return ('QSTAT NOT FOUND', '')
# Report the current status of JOB_ID
if output_value == 'F':
# F = Finished. Get the exit code reported by qstat
exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))
# Read the stdout file
if os.path.exists(job[2]):
output_file = open(job[2], 'r')
# Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
outfile = output_file.read()
output_file.close()
self.testOutputAndFinish(tester, exit_code, outfile)
else:
# I ran into this scenario when the cluster went down, but launched/completed my job :)
self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)
elif output_value == 'R':
# Job is currently running
self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
elif output_value == 'E':
# Job is exiting
self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
elif output_value == 'Q':
# Job is currently queued
self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
else:
return ('BATCH FILE NOT FOUND', '')
def buildPBSBatch(self, output, tester):
# Create/Update the batch file
if 'command not found' in output:
return ('QSUB NOT FOUND', '')
else:
# Get the Job information from the ClusterLauncher
results = re.findall(r'JOB_NAME: (\w+) JOB_ID:.* (\d+).*TEST_NAME: (\S+)', output)
if len(results) != 0:
file_name = self.options.pbs
job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
for result in results:
(test_dir, job_id, test_name) = result
qstat_command = subprocess.Popen(['qstat', '-f', '-x', str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
# Get the Output_Path from qstat stdout
if qstat_stdout != None:
output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '').strip()
else:
job_list.close()
return ('QSTAT NOT FOUND', '')
# Write job_id, test['test_name'], and Ouput_Path to the batch file
job_list.write(str(job_id) + ':' + test_name + ':' + output_value + ':' + self.options.input_file_name + '\n')
# Return to TestHarness and inform we have launched the job
job_list.close()
return ('', 'LAUNCHED')
else:
return ('QSTAT INVALID RESULTS', output)
def cleanPBSBatch(self):
# Open the PBS batch file and assign it to a list
if os.path.exists(self.options.pbs_cleanup):
batch_file = open(self.options.pbs_cleanup, 'r')
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
else:
print 'PBS batch file not found:', self.options.pbs_cleanup
sys.exit(1)
# Loop through launched jobs and delete whats found.
for job in batch_list:
if os.path.exists(job[2]):
batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
if os.path.exists('/'.join(batch_dir)):
shutil.rmtree('/'.join(batch_dir))
if os.path.exists('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster'):
os.remove('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster')
os.remove(self.options.pbs_cleanup)
# END PBS Defs
## Update global variables and print output based on the test result
# Containing OK means it passed, skipped means skipped, anything else means it failed
def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
timing = ''
if self.options.timing:
timing = self.getTiming(output)
elif self.options.store_time:
timing = self.getSolveTime(output)
# Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
# in the 'Final Test Results' area.
if add_to_table:
self.test_table.append( (specs, output, result, timing, start, end) )
if result.find('OK') != -1 or result.find('DRY_RUN') != -1:
self.num_passed += 1
elif result.find('skipped') != -1:
self.num_skipped += 1
elif result.find('deleted') != -1:
self.num_skipped += 1
elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
self.num_pending += 1
else:
self.num_failed += 1
self.postRun(specs, timing)
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(specs['test_name'], result, timing, start, end, self.options)
if self.options.verbose or ('FAILED' in result and not self.options.quiet):
output = output.replace('\r', '\n') # replace the carriage returns with newlines
lines = output.split('\n');
color = ''
if 'EXODIFF' in result or 'CSVDIFF' in result:
color = 'YELLOW'
elif 'FAILED' in result:
color = 'RED'
else:
color = 'GREEN'
test_name = colorText(specs['test_name'] + ": ", color, colored=self.options.colored, code=self.options.code)
output = test_name + ("\n" + test_name).join(lines)
print output
# Print result line again at the bottom of the output for failed tests
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
else:
print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"
if not 'skipped' in result:
if self.options.file:
if self.options.show_directory:
self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
else:
self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
f = open(fname, 'w')
f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
f.write(output)
f.close()
# Write the app_name to a file, if the tests passed
def writeState(self, app_name):
# If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
if os.environ.has_key("BITTEN_STATUS_MOOSE"):
result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
result_file.write(os.path.split(app_name)[1].split('-')[0] + '\n')
result_file.close()
# Print final results, close open files, and exit with the correct error code
def cleanup(self):
# Print the results table again if a bunch of output was spewed to the screen between
# tests as they were running
if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
if self.options.show_directory:
print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(test['test_name'], result, timing, start, end, self.options)
time = clock() - self.start_time
print '-' * (TERM_COLS-1)
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)
if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>'
else:
summary += ', <b>%d pending</b>'
if self.num_failed:
summary += ', <r>%d FAILED</r>'
else:
summary += ', <b>%d failed</b>'
# Mask off TestHarness error codes to report parser errors
if self.error_code & Parser.getErrorCodeMask():
summary += ', <r>FATAL PARSER ERROR</r>'
print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), "", html = True, \
colored=self.options.colored, code=self.options.code )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs
if self.file:
self.file.close()
if self.num_failed == 0:
self.writeState(self.executable)
def initialize(self, argv, app_name):
# Initialize the parallel runner with how many tests to run in parallel
self.runner = RunParallel(self, self.options.jobs, self.options.load)
## Save executable-under-test name to self.executable
self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method
# Save the output dir since the current working directory changes during tests
self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)
# Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
if self.options.output_dir:
try:
os.makedirs(self.output_dir)
except OSError, ex:
if ex.errno == errno.EEXIST: pass
else: raise
# Open the file to redirect output to and set the quiet option for file output
if self.options.file:
self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
if self.options.file or self.options.fail_files or self.options.sep_files:
self.options.quiet = True
## Parse command line options and assign them to self.options
def parseCLArgs(self, argv):
parser = argparse.ArgumentParser(description='A tool used to test MOOSE based applications')
parser.add_argument('test_name', nargs=argparse.REMAINDER)
parser.add_argument('--opt', action='store_const', dest='method', const='opt', help='test the app_name-opt binary')
parser.add_argument('--dbg', action='store_const', dest='method', const='dbg', help='test the app_name-dbg binary')
parser.add_argument('--devel', action='store_const', dest='method', const='devel', help='test the app_name-devel binary')
parser.add_argument('--oprof', action='store_const', dest='method', const='oprof', help='test the app_name-oprof binary')
parser.add_argument('--pro', action='store_const', dest='method', const='pro', help='test the app_name-pro binary')
parser.add_argument('-j', '--jobs', nargs='?', metavar='int', action='store', type=int, dest='jobs', const=1, help='run test binaries in parallel')
parser.add_argument('-e', action='store_true', dest='extra_info', help='Display "extra" information including all caveats and deleted tests')
parser.add_argument('-c', '--no-color', action='store_false', dest='colored', help='Do not show colored output')
parser.add_argument('--heavy', action='store_true', dest='heavy_tests', help='Run tests marked with HEAVY : True')
parser.add_argument('--all-tests', action='store_true', dest='all_tests', help='Run normal tests and tests marked with HEAVY : True')
parser.add_argument('-g', '--group', action='store', type=str, dest='group', default='ALL', help='Run only tests in the named group')
parser.add_argument('--not_group', action='store', type=str, dest='not_group', help='Run only tests NOT in the named group')
# parser.add_argument('--dofs', action='store', dest='dofs', help='This option is for automatic scaling which is not currently implemented in MOOSE 2.0')
parser.add_argument('--dbfile', nargs='?', action='store', dest='dbFile', help='Location to timings data base file. If not set, assumes $HOME/timingDB/timing.sqlite')
parser.add_argument('-l', '--load-average', action='store', type=float, dest='load', default=64.0, help='Do not run additional tests if the load average is at least LOAD')
parser.add_argument('-t', '--timing', action='store_true', dest='timing', help='Report Timing information for passing tests')
parser.add_argument('-s', '--scale', action='store_true', dest='scaling', help='Scale problems that have SCALE_REFINE set')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', default='tests', help='The default test specification file to look for (default="tests").')
parser.add_argument('--libmesh_dir', nargs=1, action='store', type=str, dest='libmesh_dir', help='Currently only needed for bitten code coverage')
parser.add_argument('--skip-config-checks', action='store_true', dest='skip_config_checks', help='Skip configuration checks (all tests will run regardless of restrictions)')
parser.add_argument('--parallel', '-p', nargs='?', action='store', type=int, dest='parallel', const=1, help='Number of processors to use when running mpiexec')
parser.add_argument('--n-threads', nargs=1, action='store', type=int, dest='nthreads', default=1, help='Number of threads to use when running mpiexec')
parser.add_argument('-d', action='store_true', dest='debug_harness', help='Turn on Test Harness debugging')
parser.add_argument('--recover', action='store_true', dest='enable_recover', help='Run a test in recover mode')
parser.add_argument('--valgrind', action='store_const', dest='valgrind_mode', const='NORMAL', help='Run normal valgrind tests')
parser.add_argument('--valgrind-heavy', action='store_const', dest='valgrind_mode', const='HEAVY', help='Run heavy valgrind tests')
parser.add_argument('--valgrind-max-fails', nargs=1, type=int, dest='valgrind_max_fails', default=5, help='The number of valgrind tests allowed to fail before any additional valgrind tests will run')
parser.add_argument('--max-fails', nargs=1, type=int, dest='max_fails', default=50, help='The number of tests allowed to fail before any additional tests will run')
parser.add_argument('--pbs', nargs='?', metavar='batch_file', dest='pbs', const='generate', help='Enable launching tests via PBS. If no batch file is specified one will be created for you')
parser.add_argument('--pbs-cleanup', nargs=1, metavar='batch_file', help='Clean up the directories/files created by PBS. You must supply the same batch_file used to launch PBS.')
parser.add_argument('--pbs-project', nargs=1, default='moose', help='Identify PBS job submission to specified project')
parser.add_argument('--re', action='store', type=str, dest='reg_exp', help='Run tests that match --re=regular_expression')
# Options that pass straight through to the executable
parser.add_argument('--parallel-mesh', action='store_true', dest='parallel_mesh', help='Deprecated, use --distributed-mesh instead')
parser.add_argument('--distributed-mesh', action='store_true', dest='distributed_mesh', help='Pass "--distributed-mesh" to executable')
parser.add_argument('--error', action='store_true', help='Run the tests with warnings as errors (Pass "--error" to executable)')
parser.add_argument('--error-unused', action='store_true', help='Run the tests with errors on unused parameters (Pass "--error-unused" to executable)')
# Option to use for passing unwrapped options to the executable
parser.add_argument('--cli-args', nargs='?', type=str, dest='cli_args', help='Append the following list of arguments to the command line (Encapsulate the command in quotes)')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', help="Pass --dry-run to print commands to run, but don't actually run them")
outputgroup = parser.add_argument_group('Output Options', 'These options control the output of the test harness. The sep-files options write output to files named test_name.TEST_RESULT.txt. All file output will overwrite old files')
outputgroup.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='show the output of every test')
outputgroup.add_argument('-q', '--quiet', action='store_true', dest='quiet', help='only show the result of every test, don\'t show test output even if it fails')
outputgroup.add_argument('--no-report', action='store_false', dest='report_skipped', help='do not report skipped tests')
outputgroup.add_argument('--show-directory', action='store_true', dest='show_directory', help='Print test directory path in out messages')
outputgroup.add_argument('-o', '--output-dir', nargs=1, metavar='directory', dest='output_dir', default='', help='Save all output files in the directory, and create it if necessary')
outputgroup.add_argument('-f', '--file', nargs=1, action='store', dest='file', help='Write verbose output of each test to FILE and quiet output to terminal')
outputgroup.add_argument('-x', '--sep-files', action='store_true', dest='sep_files', help='Write the output of each test to a separate file. Only quiet output to terminal. This is equivalant to \'--sep-files-fail --sep-files-ok\'')
outputgroup.add_argument('--sep-files-ok', action='store_true', dest='ok_files', help='Write the output of each passed test to a separate file')
outputgroup.add_argument('-a', '--sep-files-fail', action='store_true', dest='fail_files', help='Write the output of each FAILED test to a separate file. Only quiet output to terminal.')
outputgroup.add_argument("--store-timing", action="store_true", dest="store_time", help="Store timing in the SQL database: $HOME/timingDB/timing.sqlite A parent directory (timingDB) must exist.")
outputgroup.add_argument("--testharness-unittest", action="store_true", help="Run the TestHarness unittests that test the TestHarness.")
outputgroup.add_argument("--revision", nargs=1, action="store", type=str, dest="revision", help="The current revision being tested. Required when using --store-timing.")
outputgroup.add_argument("--yaml", action="store_true", dest="yaml", help="Dump the parameters for the testers in Yaml Format")
outputgroup.add_argument("--dump", action="store_true", dest="dump", help="Dump the parameters for the testers in GetPot Format")
code = True
if self.code.decode('hex') in argv:
del argv[argv.index(self.code.decode('hex'))]
code = False
self.options = parser.parse_args(argv[1:])
self.tests = self.options.test_name
self.options.code = code
# Convert all list based options of length one to scalars
for key, value in vars(self.options).items():
if type(value) == list and len(value) == 1:
tmp_str = getattr(self.options, key)
setattr(self.options, key, value[0])
self.checkAndUpdateCLArgs()
## Called after options are parsed from the command line
# Exit if options don't make any sense, print warnings if they are merely weird
def checkAndUpdateCLArgs(self):
opts = self.options
if opts.output_dir and not (opts.file or opts.sep_files or opts.fail_files or opts.ok_files):
print 'WARNING: --output-dir is specified but no output files will be saved, use -f or a --sep-files option'
if opts.group == opts.not_group:
print 'ERROR: The group and not_group options cannot specify the same group'
sys.exit(1)
if opts.store_time and not (opts.revision):
print 'ERROR: --store-timing is specified but no revision'
sys.exit(1)
if opts.store_time:
# timing returns Active Time, while store_timing returns Solve Time.
# Thus we need to turn off timing.
opts.timing = False
opts.scaling = True
if opts.valgrind_mode and (opts.parallel > 1 or opts.nthreads > 1):
print 'ERROR: --parallel and/or --threads can not be used with --valgrind'
sys.exit(1)
# Update any keys from the environment as necessary
if not self.options.method:
if os.environ.has_key('METHOD'):
self.options.method = os.environ['METHOD']
else:
self.options.method = 'opt'
if not self.options.valgrind_mode:
self.options.valgrind_mode = ''
# Update libmesh_dir to reflect arguments
if opts.libmesh_dir:
self.libmesh_dir = opts.libmesh_dir
# Generate a batch file if PBS argument supplied with out a file
if opts.pbs == 'generate':
largest_serial_num = 0
for name in os.listdir('.'):
m = re.search('pbs_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
opts.pbs = "pbs_" + str(largest_serial_num+1).zfill(3)
# When running heavy tests, we'll make sure we use --no-report
if opts.heavy_tests:
self.options.report_skipped = False
def postRun(self, specs, timing):
return
def preRun(self):
if self.options.yaml:
self.factory.printYaml("Tests")
sys.exit(0)
elif self.options.dump:
self.factory.printDump("Tests")
sys.exit(0)
if self.options.pbs_cleanup:
self.cleanPBSBatch()
sys.exit(0)
def getOptions(self):
return self.options
#################################################################################################################################
# The TestTimer TestHarness
# This method finds and stores timing for individual tests. It is activated with --store-timing
#################################################################################################################################
CREATE_TABLE = """create table timing
(
app_name text,
test_name text,
revision text,
date int,
seconds real,
scale int,
load real
);"""
class TestTimer(TestHarness):
def __init__(self, argv, app_name, moose_dir):
TestHarness.__init__(self, argv, app_name, moose_dir)
try:
from sqlite3 import dbapi2 as sqlite
except:
print 'Error: --store-timing requires the sqlite3 python module.'
sys.exit(1)
self.app_name = app_name
self.db_file = self.options.dbFile
if not self.db_file:
home = os.environ['HOME']
self.db_file = os.path.join(home, 'timingDB/timing.sqlite')
if not os.path.exists(self.db_file):
print 'Warning: creating new database at default location: ' + str(self.db_file)
self.createDB(self.db_file)
else:
print 'Warning: Assuming database location ' + self.db_file
def createDB(self, fname):
from sqlite3 import dbapi2 as sqlite
print 'Creating empty database at ' + fname
con = sqlite.connect(fname)
cr = con.cursor()
cr.execute(CREATE_TABLE)
con.commit()
def preRun(self):
from sqlite3 import dbapi2 as sqlite
# Delete previous data if app_name and repo revision are found
con = sqlite.connect(self.db_file)
cr = con.cursor()
cr.execute('delete from timing where app_name = ? and revision = ?', (self.app_name, self.options.revision))
con.commit()
# After the run store the results in the database
def postRun(self, test, timing):
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(self.db_file)
cr = con.cursor()
timestamp = int(time.time())
load = os.getloadavg()[0]
# accumulate the test results
data = []
sum_time = 0
num = 0
parse_failed = False
# Were only interested in storing scaled data
if timing != None and test['scale_refine'] != 0:
sum_time += float(timing)
num += 1
data.append( (self.app_name, test['test_name'].split('/').pop(), self.options.revision, timestamp, timing, test['scale_refine'], load) )
# Insert the data into the database
cr.executemany('insert into timing values (?,?,?,?,?,?,?)', data)
con.commit()
class TestHarnessTester(object):
"""
Class for running TestHarness unit tests.
"""
def __init__(self, argv, *args):
self._argv = argv
def findAndRunTests(self):
"""
Execute the unittests for the TestHarness.
"""
location = os.path.join(os.path.dirname(__file__), 'unit_tests')
loader = unittest.TestLoader()
suite = loader.discover(location)#, pattern)
runner = unittest.TextTestRunner(verbosity=2)
self.error_code = int(not runner.run(suite).wasSuccessful())
|
paulthulstrup/moose
|
python/TestHarness/TestHarness.py
|
Python
|
lgpl-2.1
| 45,149
|
[
"MOOSE",
"VTK"
] |
e23771ce8018f4539e687a326d2f0bee3ffc01e1a9b3ed9094a84654919d7ee3
|
""" TaskQueueDB class is a front-end to the task queues db
"""
__RCSID__ = "ebed3a8 (2012-07-06 20:33:11 +0200) Adri Casajs <adria@ecm.ub.es>"
import types
import random
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.private.SharesCorrector import SharesCorrector
from DIRAC.WorkloadManagementSystem.private.Queues import maxCPUSegments
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Security import Properties, CS
DEFAULT_GROUP_SHARE = 1000
TQ_MIN_SHARE = 0.001
class TaskQueueDB( DB ):
def __init__( self ):
random.seed()
DB.__init__( self, 'TaskQueueDB', 'WorkloadManagement/TaskQueueDB' )
self.__multiValueDefFields = ( 'Sites', 'GridCEs', 'GridMiddlewares', 'BannedSites',
'Platforms', 'PilotTypes', 'SubmitPools', 'JobTypes', 'Tags' )
self.__multiValueMatchFields = ( 'GridCE', 'Site', 'GridMiddleware', 'Platform',
'PilotType', 'SubmitPool', 'JobType', 'Tag' )
self.__tagMatchFields = ( 'Tag', )
self.__bannedJobMatchFields = ( 'Site', )
self.__strictRequireMatchFields = ( 'SubmitPool', 'Platform', 'PilotType', 'Tag' )
self.__singleValueDefFields = ( 'OwnerDN', 'OwnerGroup', 'Setup', 'CPUTime' )
self.__mandatoryMatchFields = ( 'Setup', 'CPUTime' )
self.__priorityIgnoredFields = ( 'Sites', 'BannedSites' )
self.__maxJobsInTQ = 5000
self.__defaultCPUSegments = maxCPUSegments
self.__maxMatchRetry = 3
self.__jobPriorityBoundaries = ( 0.001, 10 )
self.__groupShares = {}
self.__deleteTQWithDelay = DictCache( self.__deleteTQIfEmpty )
self.__opsHelper = Operations()
self.__ensureInsertionIsSingle = False
self.__sharesCorrector = SharesCorrector( self.__opsHelper )
result = self.__initializeDB()
if not result[ 'OK' ]:
raise Exception( "Can't create tables: %s" % result[ 'Message' ] )
def enableAllTaskQueues( self ):
""" Enable all Task queues
"""
return self.updateFields( "tq_TaskQueues", updateDict = { "Enabled" :"1" } )
def findOrphanJobs( self ):
""" Find jobs that are not in any task queue
"""
result = self._query( "select JobID from tq_Jobs WHERE TQId not in (SELECT TQId from tq_TaskQueues)" )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
def isSharesCorrectionEnabled( self ):
return self.__getCSOption( "EnableSharesCorrection", False )
def getSingleValueTQDefFields( self ):
return self.__singleValueDefFields
def getMultiValueTQDefFields( self ):
return self.__multiValueDefFields
def getMultiValueMatchFields( self ):
return self.__multiValueMatchFields
def __getCSOption( self, optionName, defValue ):
return self.__opsHelper.getValue( "JobScheduling/%s" % optionName, defValue )
def getPrivatePilots( self ):
return self.__getCSOption( "PrivatePilotTypes", [ 'private' ] )
def getValidPilotTypes( self ):
return self.__getCSOption( "AllPilotTypes", [ 'private' ] )
def __initializeDB( self ):
"""
Create the tables
"""
self.__tablesDesc = {}
self.__tablesDesc[ 'tq_TaskQueues' ] = { 'Fields' : { 'TQId' : 'INTEGER(10) UNSIGNED AUTO_INCREMENT NOT NULL',
'OwnerDN' : 'VARCHAR(255) NOT NULL',
'OwnerGroup' : 'VARCHAR(32) NOT NULL',
'Setup' : 'VARCHAR(32) NOT NULL',
'CPUTime' : 'BIGINT(20) UNSIGNED NOT NULL',
'Priority' : 'FLOAT NOT NULL',
'Enabled' : 'TINYINT(1) NOT NULL DEFAULT 0'
},
'PrimaryKey' : 'TQId',
'Indexes': { 'TQOwner': [ 'OwnerDN', 'OwnerGroup',
'Setup', 'CPUTime' ]
}
}
self.__tablesDesc[ 'tq_Jobs' ] = { 'Fields' : { 'TQId' : 'INTEGER(10) UNSIGNED NOT NULL',
'JobId' : 'INTEGER(11) UNSIGNED NOT NULL',
'Priority' : 'INTEGER UNSIGNED NOT NULL',
'RealPriority' : 'FLOAT NOT NULL'
},
'PrimaryKey' : 'JobId',
'Indexes': { 'TaskIndex': [ 'TQId' ] },
}
for multiField in self.__multiValueDefFields:
tableName = 'tq_TQTo%s' % multiField
self.__tablesDesc[ tableName ] = { 'Fields' : { 'TQId' : 'INTEGER UNSIGNED NOT NULL',
'Value' : 'VARCHAR(64) NOT NULL'
},
'Indexes': { 'TaskIndex': [ 'TQId' ], '%sIndex' % multiField: [ 'Value' ] },
}
result = self._createTables( self.__tablesDesc )
if result['OK'] and result['Value']:
self.log.info( "TaskQueueDB: created tables %s" % result['Value'] )
return result
def getGroupsInTQs( self ):
cmdSQL = "SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`"
result = self._query( cmdSQL )
if not result[ 'OK' ]:
return result
return S_OK( [ row[0] for row in result[ 'Value' ] ] )
def forceRecreationOfTables( self ):
result = self._createTables( self.__tablesDesc, force = True )
if result['OK'] and result['Value']:
self.log.info( "TaskQueueDB: created tables %s" % result['Value'] )
return result
def __strDict( self, dDict ):
lines = []
keyLength = 0
for key in dDict:
if len( key ) > keyLength:
keyLength = len( key )
for key in sorted( dDict ):
line = "%s: " % key
line = line.ljust( keyLength + 2 )
value = dDict[ key ]
if type( value ) in ( types.ListType, types.TupleType ):
line += ','.join( list( value ) )
else:
line += str( value )
lines.append( line )
return "{\n%s\n}" % "\n".join( lines )
def fitCPUTimeToSegments( self, cpuTime ):
"""
Fit the CPU time to the valid segments
"""
maxCPUSegments = self.__getCSOption( "taskQueueCPUTimeIntervals", self.__defaultCPUSegments )
try:
maxCPUSegments = [ int( seg ) for seg in maxCPUSegments ]
#Check segments in the CS
last = 0
for cpuS in maxCPUSegments:
if cpuS <= last:
maxCPUSegments = self.__defaultCPUSegments
break
last = cpuS
except:
maxCPUSegments = self.__defaultCPUSegments
#Map to a segment
for iP in range( len( maxCPUSegments ) ):
cpuSegment = maxCPUSegments[ iP ]
if cpuTime <= cpuSegment:
return cpuSegment
return maxCPUSegments[-1]
def _checkTaskQueueDefinition( self, tqDefDict ):
"""
Check a task queue definition dict is valid
"""
# Confine the LHCbPlatform legacy option here, use Platform everywhere else
# until the LHCbPlatform is no more used in the TaskQueueDB
if 'LHCbPlatforms' in tqDefDict and not "Platforms" in tqDefDict:
tqDefDict['Platforms'] = tqDefDict['LHCbPlatforms']
if 'SystemConfigs' in tqDefDict and not "Platforms" in tqDefDict:
tqDefDict['Platforms'] = tqDefDict['SystemConfigs']
for field in self.__singleValueDefFields:
if field not in tqDefDict:
return S_ERROR( "Missing mandatory field '%s' in task queue definition" % field )
fieldValueType = type( tqDefDict[ field ] )
if field in [ "CPUTime" ]:
if fieldValueType not in ( types.IntType, types.LongType ):
return S_ERROR( "Mandatory field %s value type is not valid: %s" % ( field, fieldValueType ) )
else:
if fieldValueType not in ( types.StringType, types.UnicodeType ):
return S_ERROR( "Mandatory field %s value type is not valid: %s" % ( field, fieldValueType ) )
result = self._escapeString( tqDefDict[ field ] )
if not result[ 'OK' ]:
return result
tqDefDict[ field ] = result[ 'Value' ]
for field in self.__multiValueDefFields:
if field not in tqDefDict:
continue
fieldValueType = type( tqDefDict[ field ] )
if fieldValueType not in ( types.ListType, types.TupleType ):
return S_ERROR( "Multi value field %s value type is not valid: %s" % ( field, fieldValueType ) )
result = self._escapeValues( tqDefDict[ field ] )
if not result[ 'OK' ]:
return result
tqDefDict[ field ] = result[ 'Value' ]
#FIXME: This is not used
if 'PrivatePilots' in tqDefDict:
validPilotTypes = self.getValidPilotTypes()
for pilotType in tqDefDict[ 'PrivatePilots' ]:
if pilotType not in validPilotTypes:
return S_ERROR( "PilotType %s is invalid" % pilotType )
return S_OK( tqDefDict )
def _checkMatchDefinition( self, tqMatchDict ):
"""
Check a task queue match dict is valid
"""
def travelAndCheckType( value, validTypes, escapeValues = True ):
valueType = type( value )
if valueType in ( types.ListType, types.TupleType ):
for subValue in value:
subValueType = type( subValue )
if subValueType not in validTypes:
return S_ERROR( "List contained type %s is not valid -> %s" % ( subValueType, validTypes ) )
if escapeValues:
return self._escapeValues( value )
return S_OK( value )
else:
if valueType not in validTypes:
return S_ERROR( "Type %s is not valid -> %s" % ( valueType, validTypes ) )
if escapeValues:
return self._escapeString( value )
return S_OK( value )
# Confine the LHCbPlatform legacy option here, use Platform everywhere else
# until the LHCbPlatform is no more used in the TaskQueueDB
if 'LHCbPlatform' in tqMatchDict and not "Platform" in tqMatchDict:
tqMatchDict['Platform'] = tqMatchDict['LHCbPlatform']
if 'SystemConfig' in tqMatchDict and not "Platform" in tqMatchDict:
tqMatchDict['Platform'] = tqMatchDict['SystemConfig']
for field in self.__singleValueDefFields:
if field not in tqMatchDict:
if field in self.__mandatoryMatchFields:
return S_ERROR( "Missing mandatory field '%s' in match request definition" % field )
continue
fieldValue = tqMatchDict[ field ]
if field in [ "CPUTime" ]:
result = travelAndCheckType( fieldValue, ( types.IntType, types.LongType ), escapeValues = False )
else:
result = travelAndCheckType( fieldValue, ( types.StringType, types.UnicodeType ) )
if not result[ 'OK' ]:
return S_ERROR( "Match definition field %s failed : %s" % ( field, result[ 'Message' ] ) )
tqMatchDict[ field ] = result[ 'Value' ]
#Check multivalue
for multiField in self.__multiValueMatchFields:
for field in ( multiField, "Banned%s" % multiField ):
if field in tqMatchDict:
fieldValue = tqMatchDict[ field ]
result = travelAndCheckType( fieldValue, ( types.StringType, types.UnicodeType ) )
if not result[ 'OK' ]:
return S_ERROR( "Match definition field %s failed : %s" % ( field, result[ 'Message' ] ) )
tqMatchDict[ field ] = result[ 'Value' ]
return S_OK( tqMatchDict )
def __createTaskQueue( self, tqDefDict, priority = 1, connObj = False ):
"""
Create a task queue
Returns S_OK( tqId ) / S_ERROR
"""
if not connObj:
result = self._getConnection()
if not result[ 'OK' ]:
return S_ERROR( "Can't create task queue: %s" % result[ 'Message' ] )
connObj = result[ 'Value' ]
tqDefDict[ 'CPUTime' ] = self.fitCPUTimeToSegments( tqDefDict[ 'CPUTime' ] )
sqlSingleFields = [ 'TQId', 'Priority' ]
sqlValues = [ "0", str( priority ) ]
for field in self.__singleValueDefFields:
sqlSingleFields.append( field )
sqlValues.append( tqDefDict[ field ] )
#Insert the TQ Disabled
sqlSingleFields.append( "Enabled" )
sqlValues.append( "0" )
cmd = "INSERT INTO tq_TaskQueues ( %s ) VALUES ( %s )" % ( ", ".join( sqlSingleFields ), ", ".join( [ str( v ) for v in sqlValues ] ) )
result = self._update( cmd, conn = connObj )
if not result[ 'OK' ]:
self.log.error( "Can't insert TQ in DB", result[ 'Value' ] )
return result
if 'lastRowId' in result:
tqId = result['lastRowId']
else:
result = self._query( "SELECT LAST_INSERT_ID()", conn = connObj )
if not result[ 'OK' ]:
self.cleanOrphanedTaskQueues( connObj = connObj )
return S_ERROR( "Can't determine task queue id after insertion" )
tqId = result[ 'Value' ][0][0]
for field in self.__multiValueDefFields:
if field not in tqDefDict:
continue
values = List.uniqueElements( [ value for value in tqDefDict[ field ] if value.strip() ] )
if not values:
continue
cmd = "INSERT INTO `tq_TQTo%s` ( TQId, Value ) VALUES " % field
cmd += ", ".join( [ "( %s, %s )" % ( tqId, str( value ) ) for value in values ] )
result = self._update( cmd, conn = connObj )
if not result[ 'OK' ]:
self.log.error( "Failed to insert %s condition" % field, result[ 'Message' ] )
self.cleanOrphanedTaskQueues( connObj = connObj )
return S_ERROR( "Can't insert values %s for field %s: %s" % ( str( values ), field, result[ 'Message' ] ) )
self.log.info( "Created TQ %s" % tqId )
return S_OK( tqId )
def cleanOrphanedTaskQueues( self, connObj = False ):
"""
Delete all empty task queues
"""
self.log.info( "Cleaning orphaned TQs" )
result = self._update( "DELETE FROM `tq_TaskQueues` WHERE Enabled >= 1 AND TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )", conn = connObj )
if not result[ 'OK' ]:
return result
for mvField in self.__multiValueDefFields:
result = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId not in ( SELECT DISTINCT TQId from `tq_TaskQueues` )" % mvField,
conn = connObj )
if not result[ 'OK' ]:
return result
return S_OK()
def __setTaskQueueEnabled( self, tqId, enabled = True, connObj = False ):
if enabled:
enabled = "+ 1"
else:
enabled = "- 1"
upSQL = "UPDATE `tq_TaskQueues` SET Enabled = Enabled %s WHERE TQId=%d" % ( enabled, tqId )
result = self._update( upSQL, conn = connObj )
if not result[ 'OK' ]:
self.log.error( "Error setting TQ state", "TQ %s State %s: %s" % ( tqId, enabled, result[ 'Message' ] ) )
return result
updated = result['Value'] > 0
if updated:
self.log.info( "Set enabled = %s for TQ %s" % ( enabled, tqId ) )
return S_OK( updated )
def __hackJobPriority( self, jobPriority ):
jobPriority = min( max( int( jobPriority ), self.__jobPriorityBoundaries[0] ), self.__jobPriorityBoundaries[1] )
if jobPriority == self.__jobPriorityBoundaries[0]:
return 10 ** ( -5 )
if jobPriority == self.__jobPriorityBoundaries[1]:
return 10 ** 6
return jobPriority
def insertJob( self, jobId, tqDefDict, jobPriority, skipTQDefCheck = False, numRetries = 10 ):
"""
Insert a job in a task queue
Returns S_OK( tqId ) / S_ERROR
"""
try:
long( jobId )
except ValueError:
return S_ERROR( "JobId is not a number!" )
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
if not skipTQDefCheck:
tqDefDict = dict( tqDefDict )
retVal = self._checkTaskQueueDefinition( tqDefDict )
if not retVal[ 'OK' ]:
self.log.error( "TQ definition check failed", retVal[ 'Message' ] )
return retVal
tqDefDict = retVal[ 'Value' ]
tqDefDict[ 'CPUTime' ] = self.fitCPUTimeToSegments( tqDefDict[ 'CPUTime' ] )
self.log.info( "Inserting job %s with requirements: %s" % ( jobId, self.__strDict( tqDefDict ) ) )
retVal = self.__findAndDisableTaskQueue( tqDefDict, skipDefinitionCheck = True, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
tqInfo = retVal[ 'Value' ]
newTQ = False
if not tqInfo[ 'found' ]:
self.log.info( "Creating a TQ for job %s" % jobId )
retVal = self.__createTaskQueue( tqDefDict, 1, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
tqId = retVal[ 'Value' ]
newTQ = True
else:
tqId = tqInfo[ 'tqId' ]
self.log.info( "Found TQ %s for job %s requirements" % ( tqId, jobId ) )
try:
result = self.__insertJobInTaskQueue( jobId, tqId, int( jobPriority ), checkTQExists = False, connObj = connObj )
if not result[ 'OK' ]:
self.log.error( "Error inserting job in TQ", "Job %s TQ %s: %s" % ( jobId, tqId, result[ 'Message' ] ) )
return result
if newTQ:
self.recalculateTQSharesForEntity( tqDefDict[ 'OwnerDN' ], tqDefDict[ 'OwnerGroup' ], connObj = connObj )
finally:
self.__setTaskQueueEnabled( tqId, True )
return S_OK()
def __insertJobInTaskQueue( self, jobId, tqId, jobPriority, checkTQExists = True, connObj = False ):
"""
Insert a job in a given task queue
"""
self.log.info( "Inserting job %s in TQ %s with priority %s" % ( jobId, tqId, jobPriority ) )
if not connObj:
result = self._getConnection()
if not result[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % result[ 'Message' ] )
connObj = result[ 'Value' ]
if checkTQExists:
result = self._query( "SELECT tqId FROM `tq_TaskQueues` WHERE TQId = %s" % tqId, conn = connObj )
if not result[ 'OK' ] or len ( result[ 'Value' ] ) == 0:
return S_OK( "Can't find task queue with id %s: %s" % ( tqId, result[ 'Message' ] ) )
hackedPriority = self.__hackJobPriority( jobPriority )
result = self._update( "INSERT INTO tq_Jobs ( TQId, JobId, Priority, RealPriority ) VALUES ( %s, %s, %s, %f ) ON DUPLICATE KEY UPDATE TQId = %s, Priority = %s, RealPriority = %f" % ( tqId, jobId, jobPriority, hackedPriority, tqId, jobPriority, hackedPriority ), conn = connObj )
if not result[ 'OK' ]:
return result
return S_OK()
def __generateTQFindSQL( self, tqDefDict, skipDefinitionCheck = False, connObj = False ):
"""
Find a task queue that has exactly the same requirements
"""
if not skipDefinitionCheck:
tqDefDict = dict( tqDefDict )
result = self._checkTaskQueueDefinition( tqDefDict )
if not result[ 'OK' ]:
return result
tqDefDict = result[ 'Value' ]
sqlCondList = []
for field in self.__singleValueDefFields:
sqlCondList.append( "`tq_TaskQueues`.%s = %s" % ( field, tqDefDict[ field ] ) )
#MAGIC SUBQUERIES TO ENSURE STRICT MATCH
for field in self.__multiValueDefFields:
tableName = '`tq_TQTo%s`' % field
if field in tqDefDict and tqDefDict[ field ]:
firstQuery = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = `tq_TaskQueues`.TQId" % ( tableName, tableName, tableName )
grouping = "GROUP BY %s.TQId" % tableName
valuesList = List.uniqueElements( [ value.strip() for value in tqDefDict[ field ] if value.strip() ] )
numValues = len( valuesList )
secondQuery = "%s AND %s.Value in (%s)" % ( firstQuery, tableName,
",".join( [ "%s" % str( value ) for value in valuesList ] ) )
sqlCondList.append( "%s = (%s %s)" % ( numValues, firstQuery, grouping ) )
sqlCondList.append( "%s = (%s %s)" % ( numValues, secondQuery, grouping ) )
else:
sqlCondList.append( "`tq_TaskQueues`.TQId not in ( SELECT DISTINCT %s.TQId from %s )" % ( tableName, tableName ) )
#END MAGIC: That was easy ;)
return S_OK( " AND ".join( sqlCondList ) )
def __findAndDisableTaskQueue( self, tqDefDict, skipDefinitionCheck = False, retries = 10, connObj = False ):
""" Disable and find TQ
"""
for _ in range( retries ):
result = self.__findSmallestTaskQueue( tqDefDict, skipDefinitionCheck = skipDefinitionCheck, connObj = connObj )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if not data[ 'found' ]:
return result
if data[ 'enabled' ] < 1:
gLogger.notice( "TaskQueue {tqId} seems to be already disabled ({enabled})".format( **data ) )
result = self.__setTaskQueueEnabled( data[ 'tqId' ], False )
if result[ 'OK' ]:
return S_OK( data )
return S_ERROR( "Could not disable TQ" )
def __findSmallestTaskQueue( self, tqDefDict, skipDefinitionCheck = False, connObj = False ):
"""
Find a task queue that has exactly the same requirements
"""
result = self.__generateTQFindSQL( tqDefDict, skipDefinitionCheck = skipDefinitionCheck,
connObj = connObj )
if not result[ 'OK' ]:
return result
sqlCmd = "SELECT COUNT( `tq_Jobs`.JobID ), `tq_TaskQueues`.TQId, `tq_TaskQueues`.Enabled FROM `tq_TaskQueues`, `tq_Jobs`"
sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId AND %s GROUP BY `tq_Jobs`.TQId ORDER BY COUNT( `tq_Jobs`.JobID ) ASC" % ( sqlCmd, result[ 'Value' ] )
result = self._query( sqlCmd, conn = connObj )
if not result[ 'OK' ]:
return S_ERROR( "Can't find task queue: %s" % result[ 'Message' ] )
data = result[ 'Value' ]
if len( data ) == 0 or data[0][0] >= self.__maxJobsInTQ:
return S_OK( { 'found' : False } )
return S_OK( { 'found' : True, 'tqId' : data[0][1], 'enabled' : data[0][2], 'jobs' : data[0][0] } )
def matchAndGetJob( self, tqMatchDict, numJobsPerTry = 50, numQueuesPerTry = 10, negativeCond = {} ):
"""
Match a job
"""
#Make a copy to avoid modification of original if escaping needs to be done
tqMatchDict = dict( tqMatchDict )
self.log.info( "Starting match for requirements", self.__strDict( tqMatchDict ) )
retVal = self._checkMatchDefinition( tqMatchDict )
if not retVal[ 'OK' ]:
self.log.error( "TQ match request check failed", retVal[ 'Message' ] )
return retVal
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't connect to DB: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
preJobSQL = "SELECT `tq_Jobs`.JobId, `tq_Jobs`.TQId FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s AND `tq_Jobs`.Priority = %s"
prioSQL = "SELECT `tq_Jobs`.Priority FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s ORDER BY RAND() / `tq_Jobs`.RealPriority ASC LIMIT 1"
postJobSQL = " ORDER BY `tq_Jobs`.JobId ASC LIMIT %s" % numJobsPerTry
for _ in range( self.__maxMatchRetry ):
if 'JobID' in tqMatchDict:
# A certain JobID is required by the resource, so all TQ are to be considered
retVal = self.matchAndGetTaskQueue( tqMatchDict, numQueuesToGet = 0, skipMatchDictDef = True, connObj = connObj )
preJobSQL = "%s AND `tq_Jobs`.JobId = %s " % ( preJobSQL, tqMatchDict['JobID'] )
else:
retVal = self.matchAndGetTaskQueue( tqMatchDict,
numQueuesToGet = numQueuesPerTry,
skipMatchDictDef = True,
negativeCond = negativeCond,
connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
tqList = retVal[ 'Value' ]
if len( tqList ) == 0:
self.log.info( "No TQ matches requirements" )
return S_OK( { 'matchFound' : False, 'tqMatch' : tqMatchDict } )
for tqId, tqOwnerDN, tqOwnerGroup in tqList:
self.log.info( "Trying to extract jobs from TQ %s" % tqId )
retVal = self._query( prioSQL % tqId, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve winning priority for matching job: %s" % retVal[ 'Message' ] )
if len( retVal[ 'Value' ] ) == 0:
continue
prio = retVal[ 'Value' ][0][0]
retVal = self._query( "%s %s" % ( preJobSQL % ( tqId, prio ), postJobSQL ), conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't begin transaction for matching job: %s" % retVal[ 'Message' ] )
jobTQList = [ ( row[0], row[1] ) for row in retVal[ 'Value' ] ]
if len( jobTQList ) == 0:
gLogger.info( "Task queue %s seems to be empty, triggering a cleaning" % tqId )
self.__deleteTQWithDelay.add( tqId, 300, ( tqId, tqOwnerDN, tqOwnerGroup ) )
while len( jobTQList ) > 0:
jobId, tqId = jobTQList.pop( random.randint( 0, len( jobTQList ) - 1 ) )
self.log.info( "Trying to extract job %s from TQ %s" % ( jobId, tqId ) )
retVal = self.deleteJob( jobId, connObj = connObj )
if not retVal[ 'OK' ]:
msgFix = "Could not take job"
msgVar = " %s out from the TQ %s: %s" % ( jobId, tqId, retVal[ 'Message' ] )
self.log.error( msgFix, msgVar )
return S_ERROR( msgFix + msgVar )
if retVal[ 'Value' ] == True :
self.log.info( "Extracted job %s with prio %s from TQ %s" % ( jobId, prio, tqId ) )
return S_OK( { 'matchFound' : True, 'jobId' : jobId, 'taskQueueId' : tqId, 'tqMatch' : tqMatchDict } )
self.log.info( "No jobs could be extracted from TQ %s" % tqId )
self.log.info( "Could not find a match after %s match retries" % self.__maxMatchRetry )
return S_ERROR( "Could not find a match after %s match retries" % self.__maxMatchRetry )
def matchAndGetTaskQueue( self, tqMatchDict, numQueuesToGet = 1, skipMatchDictDef = False,
negativeCond = {}, connObj = False ):
"""
Get a queue that matches the requirements
"""
#Make a copy to avoid modification of original if escaping needs to be done
tqMatchDict = dict( tqMatchDict )
if not skipMatchDictDef:
retVal = self._checkMatchDefinition( tqMatchDict )
if not retVal[ 'OK' ]:
return retVal
retVal = self.__generateTQMatchSQL( tqMatchDict, numQueuesToGet = numQueuesToGet, negativeCond = negativeCond )
if not retVal[ 'OK' ]:
return retVal
matchSQL = retVal[ 'Value' ]
retVal = self._query( matchSQL, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
return S_OK( [ ( row[0], row[1], row[2] ) for row in retVal[ 'Value' ] ] )
def __generateSQLSubCond( self, sqlString, value, boolOp = 'OR' ):
if type( value ) not in ( types.ListType, types.TupleType ):
return sqlString % str( value ).strip()
sqlORList = []
for v in value:
sqlORList.append( sqlString % str( v ).strip() )
return "( %s )" % ( " %s " % boolOp ).join( sqlORList )
def __generateNotSQL( self, tableDict, negativeCond ):
""" Generate negative conditions
Can be a list of dicts or a dict:
- list of dicts will be OR of conditional dicts
- dicts will be normal conditional dict ( kay1 in ( v1, v2, ... ) AND key2 in ( v3, v4, ... ) )
"""
condType = type( negativeCond )
if condType in ( types.ListType, types.TupleType ):
sqlCond = []
for cD in negativeCond:
sqlCond.append( self.__generateNotDictSQL( tableDict, cD ) )
return " ( %s )" % " OR ".join( sqlCond )
elif condType == types.DictType:
return self.__generateNotDictSQL( tableDict, negativeCond )
raise RuntimeError( "negativeCond has to be either a list or a dict and it's %s" % condType )
def __generateNotDictSQL( self, tableDict, negativeCond ):
""" Generate the negative sql condition from a standard condition dict
not ( cond1 and cond2 ) = ( not cond1 or not cond 2 )
For instance: { 'Site': 'S1', 'JobType': [ 'T1', 'T2' ] }
( not 'S1' in Sites or ( not 'T1' in JobType and not 'T2' in JobType ) )
S2 T1 -> not False or ( not True and not False ) -> True or ... -> True -> Eligible
S1 T3 -> not True or ( not False and not False ) -> False or (True and True ) -> True -> Eligible
S1 T1 -> not True or ( not True and not False ) -> False or ( False and True ) -> False -> Nop
"""
condList = []
for field in negativeCond:
if field in self.__multiValueMatchFields:
fullTableN = '`tq_TQTo%ss`' % field
valList = negativeCond[ field ]
if type( valList ) not in ( types.TupleType, types.ListType ):
valList = ( valList, )
subList = []
for value in valList:
value = self._escapeString( value )[ 'Value' ]
sql = "%s NOT IN ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( value,
fullTableN, fullTableN, fullTableN )
subList.append( sql )
condList.append( "( %s )" % " AND ".join( subList ) )
elif field in self.__singleValueDefFields:
for value in negativeCond[field]:
value = self._escapeString( value )[ 'Value' ]
sql = "%s != tq.%s " % ( value, field )
condList.append( sql )
return "( %s )" % " OR ".join( condList )
def __generateTablesName( self, sqlTables, field ):
fullTableName = 'tq_TQTo%ss' % field
if fullTableName not in sqlTables:
tableN = field.lower()
sqlTables[ fullTableName ] = tableN
return tableN, "`%s`" % fullTableName,
return sqlTables[ fullTableName ], "`%s`" % fullTableName
def __generateTQMatchSQL( self, tqMatchDict, numQueuesToGet = 1, negativeCond = {} ):
"""
Generate the SQL needed to match a task queue
"""
#Only enabled TQs
sqlCondList = []
sqlTables = { "tq_TaskQueues" : "tq" }
#If OwnerDN and OwnerGroup are defined only use those combinations that make sense
if 'OwnerDN' in tqMatchDict and 'OwnerGroup' in tqMatchDict:
groups = tqMatchDict[ 'OwnerGroup' ]
if type( groups ) not in ( types.ListType, types.TupleType ):
groups = [ groups ]
dns = tqMatchDict[ 'OwnerDN' ]
if type( dns ) not in ( types.ListType, types.TupleType ):
dns = [ dns ]
ownerConds = []
for group in groups:
if Properties.JOB_SHARING in CS.getPropertiesForGroup( group.replace( '"', "" ) ):
ownerConds.append( "tq.OwnerGroup = %s" % group )
else:
for dn in dns:
ownerConds.append( "( tq.OwnerDN = %s AND tq.OwnerGroup = %s )" % ( dn, group ) )
sqlCondList.append( " OR ".join( ownerConds ) )
else:
#If not both are defined, just add the ones that are defined
for field in ( 'OwnerGroup', 'OwnerDN' ):
if field in tqMatchDict:
sqlCondList.append( self.__generateSQLSubCond( "tq.%s = %%s" % field,
tqMatchDict[ field ] ) )
#Type of single value conditions
for field in ( 'CPUTime', 'Setup' ):
if field in tqMatchDict:
if field in ( 'CPUTime' ):
sqlCondList.append( self.__generateSQLSubCond( "tq.%s <= %%s" % field, tqMatchDict[ field ] ) )
else:
sqlCondList.append( self.__generateSQLSubCond( "tq.%s = %%s" % field, tqMatchDict[ field ] ) )
#Match multi value fields
for field in self.__multiValueMatchFields:
#It has to be %ss , with an 's' at the end because the columns names
# are plural and match options are singular
if field in tqMatchDict and tqMatchDict[ field ]:
_, fullTableN = self.__generateTablesName( sqlTables, field )
sqlMultiCondList = []
# if field != 'GridCE' or 'Site' in tqMatchDict:
# Jobs for masked sites can be matched if they specified a GridCE
# Site is removed from tqMatchDict if the Site is mask. In this case we want
# that the GridCE matches explicitly so the COUNT can not be 0. In this case we skip this
# condition
sqlMultiCondList.append( "( SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = tq.TQId ) = 0" % ( fullTableN, fullTableN, fullTableN ) )
if field in self.__tagMatchFields:
if tqMatchDict[field] != '"Any"':
csql = self.__generateTagSQLSubCond( fullTableN, tqMatchDict[field] )
else:
csql = self.__generateSQLSubCond( "%%s IN ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( fullTableN, fullTableN, fullTableN ), tqMatchDict[ field ] )
sqlMultiCondList.append( csql )
sqlCondList.append( "( %s )" % " OR ".join( sqlMultiCondList ) )
#In case of Site, check it's not in job banned sites
if field in self.__bannedJobMatchFields:
fullTableN = '`tq_TQToBanned%ss`' % field
csql = self.__generateSQLSubCond( "%%s not in ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( fullTableN,
fullTableN, fullTableN ), tqMatchDict[ field ], boolOp = 'OR' )
sqlCondList.append( csql )
#Resource banning
bannedField = "Banned%s" % field
if bannedField in tqMatchDict and tqMatchDict[ bannedField ]:
fullTableN = '`tq_TQTo%ss`' % field
csql = self.__generateSQLSubCond( "%%s not in ( SELECT %s.Value FROM %s WHERE %s.TQId = tq.TQId )" % ( fullTableN,
fullTableN, fullTableN ), tqMatchDict[ bannedField ], boolOp = 'OR' )
sqlCondList.append( csql )
#For certain fields, the require is strict. If it is not in the tqMatchDict, the job cannot require it
for field in self.__strictRequireMatchFields:
if field in tqMatchDict:
continue
fullTableN = '`tq_TQTo%ss`' % field
sqlCondList.append( "( SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId = tq.TQId ) = 0" % ( fullTableN, fullTableN, fullTableN ) )
# Add extra conditions
if negativeCond:
sqlCondList.append( self.__generateNotSQL( sqlTables, negativeCond ) )
#Generate the final query string
tqSqlCmd = "SELECT tq.TQId, tq.OwnerDN, tq.OwnerGroup FROM `tq_TaskQueues` tq WHERE %s" % ( " AND ".join( sqlCondList ) )
#Apply priorities
tqSqlCmd = "%s ORDER BY RAND() / tq.Priority ASC" % tqSqlCmd
#Do we want a limit?
if numQueuesToGet:
tqSqlCmd = "%s LIMIT %s" % ( tqSqlCmd, numQueuesToGet )
return S_OK( tqSqlCmd )
def __generateTagSQLSubCond( self, tableName, tagMatchList ):
""" Generate SQL condition where ALL the specified multiValue requirements must be
present in the matching resource list
"""
sql1 = "SELECT COUNT(%s.Value) FROM %s WHERE %s.TQId=tq.TQId" % ( tableName, tableName, tableName )
if type( tagMatchList ) in [types.ListType, types.TupleType]:
sql2 = sql1 + " AND %s.Value in ( %s )" % ( tableName, ','.join( [ "%s" % v for v in tagMatchList] ) )
else:
sql2 = sql1 + " AND %s.Value=%s" % ( tableName, tagMatchList )
sql = '( '+sql1+' ) = ('+sql2+' )'
return sql
def deleteJob( self, jobId, connObj = False ):
"""
Delete a job from the task queues
Return S_OK( True/False ) / S_ERROR
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't delete job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
retVal = self._query( "SELECT t.TQId, t.OwnerDN, t.OwnerGroup FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE j.JobId = %s AND t.TQId = j.TQId" % jobId, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not get job from task queue %s: %s" % ( jobId, retVal[ 'Message' ] ) )
data = retVal[ 'Value' ]
if not data:
return S_OK( False )
tqId, tqOwnerDN, tqOwnerGroup = data[0]
self.log.info( "Deleting job %s" % jobId )
retVal = self._update( "DELETE FROM `tq_Jobs` WHERE JobId = %s" % jobId, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete job from task queue %s: %s" % ( jobId, retVal[ 'Message' ] ) )
if retVal['Value'] == 0:
#No job deleted
return S_OK( False )
#Always return S_OK() because job has already been taken out from the TQ
self.__deleteTQWithDelay.add( tqId, 300, ( tqId, tqOwnerDN, tqOwnerGroup ) )
return S_OK( True )
def getTaskQueueForJob( self, jobId, connObj = False ):
"""
Return TaskQueue for a given Job
Return S_OK( [TaskQueueID] ) / S_ERROR
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't get TQ for job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
retVal = self._query( 'SELECT TQId FROM `tq_Jobs` WHERE JobId = %s ' % jobId, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
if not retVal['Value']:
return S_ERROR( 'Not in TaskQueues' )
return S_OK( retVal['Value'][0][0] )
def getTaskQueueForJobs( self, jobIDs, connObj = False ):
"""
Return TaskQueues for a given list of Jobs
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't get TQs for a job list: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
jobString = ','.join( [ str( x ) for x in jobIDs ] )
retVal = self._query( 'SELECT JobId,TQId FROM `tq_Jobs` WHERE JobId in (%s) ' % jobString, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
if not retVal['Value']:
return S_ERROR( 'Not in TaskQueues' )
resultDict = {}
for jobID, TQID in retVal['Value']:
resultDict[int( jobID )] = int( TQID )
return S_OK( resultDict )
def __getOwnerForTaskQueue( self, tqId, connObj = False ):
retVal = self._query( "SELECT OwnerDN, OwnerGroup from `tq_TaskQueues` WHERE TQId=%s" % tqId, conn = connObj )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if len( data ) == 0:
return S_OK( False )
return S_OK( retVal[ 'Value' ][0] )
def __deleteTQIfEmpty( self, args ):
( tqId, tqOwnerDN, tqOwnerGroup ) = args
retries = 3
while retries:
retries -= 1
result = self.deleteTaskQueueIfEmpty( tqId, tqOwnerDN, tqOwnerGroup )
if result[ 'OK' ]:
return
gLogger.error( "Could not delete TQ %s: %s" % ( tqId, result[ 'Message' ] ) )
def deleteTaskQueueIfEmpty( self, tqId, tqOwnerDN = False, tqOwnerGroup = False, connObj = False ):
"""
Try to delete a task queue if its empty
"""
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
if not tqOwnerDN or not tqOwnerGroup:
retVal = self.__getOwnerForTaskQueue( tqId, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if not data:
return S_OK( False )
tqOwnerDN, tqOwnerGroup = data
sqlCmd = "DELETE FROM `tq_TaskQueues` WHERE Enabled >= 1 AND `tq_TaskQueues`.TQId = %s" % tqId
sqlCmd = "%s AND `tq_TaskQueues`.TQId not in ( SELECT DISTINCT TQId from `tq_Jobs` )" % sqlCmd
retVal = self._update( sqlCmd, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) )
delTQ = retVal[ 'Value' ]
if delTQ > 0:
for mvField in self.__multiValueDefFields:
retVal = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % ( mvField, tqId ), conn = connObj )
if not retVal[ 'OK' ]:
return retVal
self.recalculateTQSharesForEntity( tqOwnerDN, tqOwnerGroup, connObj = connObj )
self.log.info( "Deleted empty and enabled TQ %s" % tqId )
return S_OK( True )
return S_OK( False )
def deleteTaskQueue( self, tqId, tqOwnerDN = False, tqOwnerGroup = False, connObj = False ):
"""
Try to delete a task queue even if it has jobs
"""
self.log.info( "Deleting TQ %s" % tqId )
if not connObj:
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
connObj = retVal[ 'Value' ]
if not tqOwnerDN or not tqOwnerGroup:
retVal = self.__getOwnerForTaskQueue( tqId, connObj = connObj )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
if not data:
return S_OK( False )
tqOwnerDN, tqOwnerGroup = data
sqlCmd = "DELETE FROM `tq_TaskQueues` WHERE `tq_TaskQueues`.TQId = %s" % tqId
retVal = self._update( sqlCmd, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) )
delTQ = retVal[ 'Value' ]
sqlCmd = "DELETE FROM `tq_Jobs` WHERE `tq_Jobs`.TQId = %s" % tqId
retVal = self._update( sqlCmd, conn = connObj )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not delete task queue %s: %s" % ( tqId, retVal[ 'Message' ] ) )
for field in self.__multiValueDefFields:
retVal = self._update( "DELETE FROM `tq_TQTo%s` WHERE TQId = %s" % ( field, tqId ), conn = connObj )
if not retVal[ 'OK' ]:
return retVal
if delTQ > 0:
self.recalculateTQSharesForEntity( tqOwnerDN, tqOwnerGroup, connObj = connObj )
return S_OK( True )
return S_OK( False )
def getMatchingTaskQueues( self, tqMatchDict, negativeCond = False ):
"""
rename to have the same method as exposed in the Matcher
"""
return self.retrieveTaskQueuesThatMatch( tqMatchDict, negativeCond = negativeCond )
def getNumTaskQueues( self ):
"""
Get the number of task queues in the system
"""
sqlCmd = "SELECT COUNT( TQId ) FROM `tq_TaskQueues`"
retVal = self._query( sqlCmd )
if not retVal[ 'OK' ]:
return retVal
return S_OK( retVal[ 'Value' ][0][0] )
def retrieveTaskQueuesThatMatch( self, tqMatchDict, negativeCond = False ):
"""
Get the info of the task queues that match a resource
"""
result = self.matchAndGetTaskQueue( tqMatchDict, numQueuesToGet = 0, negativeCond = negativeCond )
if not result[ 'OK' ]:
return result
return self.retrieveTaskQueues( [ tqTuple[0] for tqTuple in result[ 'Value' ] ] )
def retrieveTaskQueues( self, tqIdList = False ):
"""
Get all the task queues
"""
sqlSelectEntries = [ "`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority", "COUNT( `tq_Jobs`.TQId )" ]
sqlGroupEntries = [ "`tq_TaskQueues`.TQId", "`tq_TaskQueues`.Priority" ]
for field in self.__singleValueDefFields:
sqlSelectEntries.append( "`tq_TaskQueues`.%s" % field )
sqlGroupEntries.append( "`tq_TaskQueues`.%s" % field )
sqlCmd = "SELECT %s FROM `tq_TaskQueues`, `tq_Jobs`" % ", ".join( sqlSelectEntries )
sqlTQCond = ""
if tqIdList != False:
if len( tqIdList ) == 0:
return S_OK( {} )
else:
sqlTQCond += " AND `tq_TaskQueues`.TQId in ( %s )" % ", ".join( [ str( id_ ) for id_ in tqIdList ] )
sqlCmd = "%s WHERE `tq_TaskQueues`.TQId = `tq_Jobs`.TQId %s GROUP BY %s" % ( sqlCmd,
sqlTQCond,
", ".join( sqlGroupEntries ) )
retVal = self._query( sqlCmd )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve task queues info: %s" % retVal[ 'Message' ] )
tqData = {}
for record in retVal[ 'Value' ]:
tqId = record[0]
tqData[ tqId ] = { 'Priority' : record[1], 'Jobs' : record[2] }
record = record[3:]
for iP in range( len( self.__singleValueDefFields ) ):
tqData[ tqId ][ self.__singleValueDefFields[ iP ] ] = record[ iP ]
tqNeedCleaning = False
for field in self.__multiValueDefFields:
table = "`tq_TQTo%s`" % field
sqlCmd = "SELECT %s.TQId, %s.Value FROM %s" % ( table, table, table )
retVal = self._query( sqlCmd )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't retrieve task queues field % info: %s" % ( field, retVal[ 'Message' ] ) )
for record in retVal[ 'Value' ]:
tqId = record[0]
value = record[1]
if not tqId in tqData:
if tqIdList == False or tqId in tqIdList:
self.log.warn( "Task Queue %s is defined in field %s but does not exist, triggering a cleaning" % ( tqId, field ) )
tqNeedCleaning = True
else:
if field not in tqData[ tqId ]:
tqData[ tqId ][ field ] = []
tqData[ tqId ][ field ].append( value )
if tqNeedCleaning:
self.cleanOrphanedTaskQueues()
return S_OK( tqData )
def __updateGlobalShares( self ):
"""
Update internal structure for shares
"""
#Update group shares
self.__groupShares = self.getGroupShares()
#Apply corrections if enabled
if self.isSharesCorrectionEnabled():
result = self.getGroupsInTQs()
if not result[ 'OK' ]:
self.log.error( "Could not get groups in the TQs", result[ 'Message' ] )
activeGroups = result[ 'Value' ]
newShares = {}
for group in activeGroups:
if group in self.__groupShares:
newShares[ group ] = self.__groupShares[ group ]
newShares = self.__sharesCorrector.correctShares( newShares )
for group in self.__groupShares:
if group in newShares:
self.__groupShares[ group ] = newShares[ group ]
def recalculateTQSharesForAll( self ):
"""
Recalculate all priorities for TQ's
"""
if self.isSharesCorrectionEnabled():
self.log.info( "Updating correctors state" )
self.__sharesCorrector.update()
self.__updateGlobalShares()
self.log.info( "Recalculating shares for all TQs" )
retVal = self._getConnection()
if not retVal[ 'OK' ]:
return S_ERROR( "Can't insert job: %s" % retVal[ 'Message' ] )
result = self._query( "SELECT DISTINCT( OwnerGroup ) FROM `tq_TaskQueues`" )
if not result[ 'OK' ]:
return result
for group in [ r[0] for r in result[ 'Value' ] ]:
self.recalculateTQSharesForEntity( "all", group )
return S_OK()
def recalculateTQSharesForEntity( self, userDN, userGroup, connObj = False ):
"""
Recalculate the shares for a userDN/userGroup combo
"""
self.log.info( "Recalculating shares for %s@%s TQs" % ( userDN, userGroup ) )
if userGroup in self.__groupShares:
share = self.__groupShares[ userGroup ]
else:
share = float( DEFAULT_GROUP_SHARE )
if Properties.JOB_SHARING in CS.getPropertiesForGroup( userGroup ):
#If group has JobSharing just set prio for that entry, userDN is irrelevant
return self.__setPrioritiesForEntity( userDN, userGroup, share, connObj = connObj )
selSQL = "SELECT OwnerDN, COUNT(OwnerDN) FROM `tq_TaskQueues` WHERE OwnerGroup='%s' GROUP BY OwnerDN" % ( userGroup )
result = self._query( selSQL, conn = connObj )
if not result[ 'OK' ]:
return result
#Get owners in this group and the amount of times they appear
data = [ ( r[0], r[1] ) for r in result[ 'Value' ] if r ]
numOwners = len( data )
#If there are no owners do now
if numOwners == 0:
return S_OK()
#Split the share amongst the number of owners
share /= numOwners
entitiesShares = dict( [ ( row[0], share ) for row in data ] )
#If corrector is enabled let it work it's magic
if self.isSharesCorrectionEnabled():
entitiesShares = self.__sharesCorrector.correctShares( entitiesShares, group = userGroup )
#Keep updating
owners = dict( data )
#IF the user is already known and has more than 1 tq, the rest of the users don't need to be modified
#(The number of owners didn't change)
if userDN in owners and owners[ userDN ] > 1:
return self.__setPrioritiesForEntity( userDN, userGroup, entitiesShares[ userDN ], connObj = connObj )
#Oops the number of owners may have changed so we recalculate the prio for all owners in the group
for userDN in owners:
self.__setPrioritiesForEntity( userDN, userGroup, entitiesShares[ userDN ], connObj = connObj )
return S_OK()
def __setPrioritiesForEntity( self, userDN, userGroup, share, connObj = False, consolidationFunc = "AVG" ):
"""
Set the priority for a userDN/userGroup combo given a splitted share
"""
self.log.info( "Setting priorities to %s@%s TQs" % ( userDN, userGroup ) )
tqCond = [ "t.OwnerGroup='%s'" % userGroup ]
allowBgTQs = gConfig.getValue( "/Registry/Groups/%s/AllowBackgroundTQs" % userGroup, False )
if Properties.JOB_SHARING not in CS.getPropertiesForGroup( userGroup ):
tqCond.append( "t.OwnerDN='%s'" % userDN )
tqCond.append( "t.TQId = j.TQId" )
if consolidationFunc == 'AVG':
selectSQL = "SELECT j.TQId, SUM( j.RealPriority )/COUNT(j.RealPriority) FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE "
elif consolidationFunc == 'SUM':
selectSQL = "SELECT j.TQId, SUM( j.RealPriority ) FROM `tq_TaskQueues` t, `tq_Jobs` j WHERE "
else:
return S_ERROR( "Unknown consolidation func %s for setting priorities" % consolidationFunc )
selectSQL += " AND ".join( tqCond )
selectSQL += " GROUP BY t.TQId"
result = self._query( selectSQL, conn = connObj )
if not result[ 'OK' ]:
return result
tqDict = dict( result[ 'Value' ] )
if len( tqDict ) == 0:
return S_OK()
#Calculate Sum of priorities
totalPrio = 0
for k in tqDict:
if tqDict[k] > 0.1 or not allowBgTQs:
totalPrio += tqDict[ k ]
#Update prio for each TQ
for tqId in tqDict:
if tqDict[ tqId ] > 0.1 or not allowBgTQs:
prio = ( share / totalPrio ) * tqDict[ tqId ]
else:
prio = TQ_MIN_SHARE
prio = max( prio, TQ_MIN_SHARE )
tqDict[ tqId ] = prio
#Generate groups of TQs that will have the same prio=sum(prios) maomenos
result = self.retrieveTaskQueues( list( tqDict ) )
if not result[ 'OK' ]:
return result
allTQsData = result[ 'Value' ]
tqGroups = {}
for tqid in allTQsData:
tqData = allTQsData[ tqid ]
for field in ( 'Jobs', 'Priority' ) + self.__priorityIgnoredFields:
if field in tqData:
tqData.pop( field )
tqHash = []
for f in sorted( tqData ):
tqHash.append( "%s:%s" % ( f, tqData[ f ] ) )
tqHash = "|".join( tqHash )
if tqHash not in tqGroups:
tqGroups[ tqHash ] = []
tqGroups[ tqHash ].append( tqid )
tqGroups = [ tqGroups[ td ] for td in tqGroups ]
#Do the grouping
for tqGroup in tqGroups:
totalPrio = 0
if len( tqGroup ) < 2:
continue
for tqid in tqGroup:
totalPrio += tqDict[ tqid ]
for tqid in tqGroup:
tqDict[ tqid ] = totalPrio
#Group by priorities
prioDict = {}
for tqId in tqDict:
prio = tqDict[ tqId ]
if prio not in prioDict:
prioDict[ prio ] = []
prioDict[ prio ].append( tqId )
#Execute updates
for prio in prioDict:
tqList = ", ".join( [ str( tqId ) for tqId in prioDict[ prio ] ] )
updateSQL = "UPDATE `tq_TaskQueues` SET Priority=%.4f WHERE TQId in ( %s )" % ( prio, tqList )
self._update( updateSQL, conn = connObj )
return S_OK()
def getGroupShares( self ):
"""
Get all the shares as a DICT
"""
result = gConfig.getSections( "/Registry/Groups" )
if result[ 'OK' ]:
groups = result[ 'Value' ]
else:
groups = []
shares = {}
for group in groups:
shares[ group ] = gConfig.getValue( "/Registry/Groups/%s/JobShare" % group, DEFAULT_GROUP_SHARE )
return shares
def propagateTQSharesIfChanged( self ):
"""
If the shares have changed in the CS, recalculate priorities
"""
shares = self.getGroupShares()
if shares == self.__groupShares:
return S_OK()
self.__groupShares = shares
return self.recalculateTQSharesForAll()
def modifyJobsPriorities( self, jobPrioDict ):
"""
Modify the priority for some jobs
"""
for jId in jobPrioDict:
jobPrioDict[jId] = int( jobPrioDict[jId] )
maxJobsInQuery = 1000
jobsList = sorted( jobPrioDict )
prioDict = {}
for jId in jobsList:
prio = jobPrioDict[ jId ]
if not prio in prioDict:
prioDict[ prio ] = []
prioDict[ prio ].append( str( jId ) )
updated = 0
for prio in prioDict:
jobsList = prioDict[ prio ]
for i in range( maxJobsInQuery, 0, len( jobsList ) ):
jobs = ",".join( jobsList[ i : i + maxJobsInQuery ] )
updateSQL = "UPDATE `tq_Jobs` SET `Priority`=%s, `RealPriority`=%f WHERE `JobId` in ( %s )" % ( prio, self.__hackJobPriority( prio ), jobs )
result = self._update( updateSQL )
if not result[ 'OK' ]:
return result
updated += result[ 'Value' ]
if not updated:
return S_OK()
return self.recalculateTQSharesForAll()
|
coberger/DIRAC
|
WorkloadManagementSystem/DB/TaskQueueDB.py
|
Python
|
gpl-3.0
| 53,551
|
[
"DIRAC"
] |
f57268e72a9e393cb2270e84a61baad2a81bb72d87af6f2a2d6ca3e8572523a4
|
""" ProxyProvider implementation for the proxy generation using local (DIRAC)
CA credentials
"""
import os
import re
import glob
import shutil
import tempfile
import commands
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Resources.ProxyProvider.ProxyProvider import ProxyProvider
__RCSID__ = "$Id$"
userConf = """[ req ]
default_bits = 2048
encrypt_key = yes
distinguished_name = req_dn
prompt = no
req_extensions = v3_req
[ req_dn ]
C = %%s
O = %%s
OU = %%s
CN = %%s
emailAddress = %%s
[ v3_req ]
# Extensions for client certificates (`man x509v3_config`).
nsComment = "OpenSSL Generated Client Certificate"
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
%s""" % ''
caConf = """[ ca ]
default_ca = CA_default
[ CA_default ]
dir = %%s
database = $dir/index.txt
serial = $dir/serial
new_certs_dir = $dir/newcerts
default_md = sha256
private_key = %%s
certificate = %%s
name_opt = ca_default
cert_opt = ca_default
default_days = 375
preserve = no
copy_extensions = copy
policy = policy_loose
[ policy_loose ]
# Allow the intermediate CA to sign a more diverse range of certificates.
# See the POLICY FORMAT section of the `ca` man page.
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ usr_cert ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
%s""" % ''
class DIRACCAProxyProvider(ProxyProvider):
def __init__(self, parameters=None):
super(DIRACCAProxyProvider, self).__init__(parameters)
def getProxy(self, userDict):
""" Generate user proxy
:param dict userDict: user description dictionary with possible fields:
FullName, UserName, DN, EMail, DiracGroup
:return: S_OK(basestring)/S_ERROR() -- basestring is a proxy string
"""
def __createProxy():
""" Create proxy
:return: S_OK()/S_ERROR()
"""
# Evaluate full name and e-mail of the user
fullName = userDict.get('FullName')
eMail = userDict.get('EMail')
if "DN" in userDict:
# Get the DN info as a dictionary
dnDict = dict([field.split('=') for field in userDict['DN'].lstrip('/').split('/')])
if not fullName:
fullName = dnDict.get('CN')
if not eMail:
eMail = dnDict.get('emailAddress')
if not fullName or not eMail:
return S_ERROR("Incomplete user information")
userConfFile = os.path.join(userDir, fullName.replace(' ', '_') + '.cnf')
userReqFile = os.path.join(userDir, fullName.replace(' ', '_') + '.req')
userKeyFile = os.path.join(userDir, fullName.replace(' ', '_') + '.key.pem')
userCertFile = os.path.join(userDir, fullName.replace(' ', '_') + '.cert.pem')
dnFields = {}
for field in ['C', 'O', 'OU']:
dnFields[field] = self.parameters.get(field)
# Write user configuration file
with open(userConfFile, "w") as f:
f.write(userConf % (dnFields['C'], dnFields['O'], dnFields['OU'], fullName, eMail))
# Create user certificate
status, output = commands.getstatusoutput('openssl genrsa -out %s 2048' % userKeyFile)
if status:
return S_ERROR(output)
status, output = commands.getstatusoutput('openssl req -config %s -key %s -new -out %s' %
(userConfFile, userKeyFile, userReqFile))
if status:
return S_ERROR(output)
cmd = 'openssl ca -config %s -extensions usr_cert -batch -days 375 -in %s -out %s'
cmd = cmd % (caConfigFile, userReqFile, userCertFile)
status, output = commands.getstatusoutput(cmd)
if status:
return S_ERROR(output)
chain = X509Chain()
result = chain.loadChainFromFile(userCertFile)
if not result['OK']:
return result
result = chain.loadKeyFromFile(userKeyFile)
if not result['OK']:
return result
result = chain.getCredentials()
if not result['OK']:
return result
userDN = result['Value']['subject']
# Add DIRAC group if requested
diracGroup = userDict.get('DiracGroup')
if diracGroup:
result = Registry.getGroupsForDN(userDN)
if not result['OK']:
return result
if diracGroup not in result['Value']:
return S_ERROR('Requested group is not valid for the user')
return chain.generateProxyToString(365 * 24 * 3600, diracGroup=diracGroup, rfc=True)
# Prepare CA
cfg = {}
caConfigFile = self.parameters.get('CAConfigFile')
if caConfigFile:
with open(caConfigFile, "r") as caCFG:
for line in caCFG:
if re.findall('=', re.sub(r'#.*', '', line)):
field, val = re.sub(r'#.*', '', line).replace(' ', '').rstrip().split('=')
if field in ['dir', 'database', 'serial', 'new_certs_dir', 'private_key', 'certificate']:
for i in ['dir', 'database', 'serial', 'new_certs_dir', 'private_key', 'certificate']:
if cfg.get(i):
val = val.replace('$%s' % i, cfg[i])
cfg[field] = val
workingDirectory = self.parameters.get('WorkingDirectory')
caWorkingDirectory = cfg.get('dir') or tempfile.mkdtemp(dir=workingDirectory)
certLocation = cfg.get('certificate') or self.parameters.get('CertFile')
keyLocation = cfg.get('private_key') or self.parameters.get('KeyFile')
# Write configuration file
if not caConfigFile:
caConfigFile = os.path.join(caWorkingDirectory, 'CA.cnf')
with open(caConfigFile, "w") as caCFG:
caCFG.write(caConf % (caWorkingDirectory, keyLocation, certLocation))
# Check directory for new certificates
newCertsDir = cfg.get('new_certs_dir') or os.path.join(caWorkingDirectory, 'newcerts')
if not os.path.exists(newCertsDir):
os.makedirs(newCertsDir)
# Empty the certificate database
indexTxt = cfg.get('database') or caWorkingDirectory + '/index.txt'
with open(indexTxt, 'w') as ind:
ind.write('')
# Write down serial
serialLocation = cfg.get('serial') or '%s/serial' % caWorkingDirectory
with open(serialLocation, 'w') as serialFile:
serialFile.write('1000')
# Create user proxy
userDir = tempfile.mkdtemp(dir=caWorkingDirectory)
result = __createProxy()
# Clean up temporary files
if cfg.get('dir'):
shutil.rmtree(userDir)
for f in os.listdir(newCertsDir):
os.remove(os.path.join(newCertsDir, f))
for f in os.listdir(caWorkingDirectory):
if re.match("%s..*" % os.path.basename(indexTxt), f) or f.endswith('.old'):
os.remove(os.path.join(caWorkingDirectory, f))
with open(indexTxt, 'w') as indx:
indx.write('')
with open(serialLocation, 'w') as serialFile:
serialFile.write('1000')
else:
shutil.rmtree(caWorkingDirectory)
return result
def getUserDN(self, userDict):
""" Get DN of the user certificate that will be created
:param dict userDict: dictionary with user information
:return: S_OK(basestring)/S_ERROR() -- basestring is the DN string
"""
if "DN" in userDict:
# Get the DN info as a dictionary
dnDict = dict([field.split('=') for field in userDict['DN'].lstrip('/').split('/')])
# check that the DN corresponds to the template
valid = True
for field in ['C', 'O', 'OU']:
if dnDict.get(field) != self.parameters.get(field):
valid = False
if not (dnDict.get('CN') and dnDict.get('emailAddress')):
valid = False
if valid:
return S_OK(userDict['DN'])
else:
return S_ERROR('Invalid DN')
dnParameters = dict(self.parameters)
dnParameters.update(userDict)
for field in ['C', 'O', 'OU', 'FullName', 'EMail']:
if field not in dnParameters:
return S_ERROR('Incomplete user information')
dn = "/C=%(C)s/O=%(O)s/OU=%(OU)s/CN=%(FullName)s/emailAddress=%(EMail)s" % dnParameters
return S_OK(dn)
|
chaen/DIRAC
|
Resources/ProxyProvider/DIRACCAProxyProvider.py
|
Python
|
gpl-3.0
| 8,680
|
[
"DIRAC"
] |
8b5d92869ebe80dc08858ecf3c2baad699159a9ee849a536a450aac3f2df396b
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.integrator.FixPositions
**********************************
.. function:: espressopp.integrator.FixPositions(system, particleGroup, fixMask)
:param system:
:param particleGroup:
:param fixMask:
:type system:
:type particleGroup:
:type fixMask:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_FixPositions
class FixPositionsLocal(ExtensionLocal, integrator_FixPositions):
def __init__(self, system, particleGroup, fixMask):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_FixPositions, system, particleGroup, fixMask)
if pmi.isController :
class FixPositions(Extension, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.integrator.FixPositionsLocal',
pmicall = ['setFixMask', 'getFixMask'],
pmiproperty = [ 'particleGroup' ]
)
|
espressopp/espressopp
|
src/integrator/FixPositions.py
|
Python
|
gpl-3.0
| 2,020
|
[
"ESPResSo"
] |
bb8416cc0f2153fddd7ea134396c05ec76a7e9113f794d7a87ce2e21d194d7b7
|
import os
from copy import deepcopy
import sys
import cPickle
import numpy as np
import openbabel
import pybel
sys.path.append('/home/andersx/dev/charmm-dftb-py')
from sccdftb_api import run_charmm, ATOMS
def load_pickle(filename):
f = open(filename,"rb")
p = cPickle.load(f)
f.close()
return(p)
TYPEVALS = dict()
TYPEVALS["H"] = 1
TYPEVALS["C"] = 10
TYPEVALS["N"] = 100
TYPEVALS["O"] = 1000
TYPEVALS["S"] = 10000
def typeval_to_string(typeval):
inputval = deepcopy(typeval)
output = ""
for key in ["S", "O", "N", "C", "H"]:
n = inputval // TYPEVALS[key]
inputval -= n * TYPEVALS[key]
if n > 0:
output += "%1s%1i " %(key, n)
return "%-12s" % output
def get_typeval(obatom):
name = obatom.GetType()[0]
return TYPEVALS[name]
if __name__ == "__main__":
np.set_printoptions(formatter={'float': '{: 0.3f}'.format}, linewidth=1000000)
gaussian_mulliken = load_pickle("charges_gaussian.pickle")
# charmm_mulliken = load_pickle("charges_3ob_npa.pickle")
charmm_mulliken = load_pickle("charges_test.pickle")
path = "xyz_sorted/"
stats = dict()
for atom in ATOMS:
stats[atom] = dict()
listing = os.listdir(path)
for filename in sorted(listing):
if filename.endswith(".xyz"):
logfile = filename.replace(".xyz", ".log")
dftb_mulliken = charmm_mulliken[filename]
pbe_mulliken = gaussian_mulliken[logfile]
qdiff = np.array(dftb_mulliken) - np.array(pbe_mulliken)
max_qdiff = max(qdiff.min(), qdiff.max(), key=abs)
# print
# print "%-30s %7.4f" % (filename, max_qdiff), qdiff
# print "%39s" % "DFTB3/3OB", np.array(dftb_mulliken)
# print "%39s" % "PBE/aug-cc-pVTZ", np.array(pbe_mulliken)
mol = pybel.readfile("xyz", path + filename).next()
for i, atom in enumerate(mol):
type_int = 0
# print "%-6s bonds to: " % (atom.OBAtom.GetType()),
bonds = ""
for obatom in openbabel.OBAtomAtomIter(atom.OBAtom):
bonds += "%-6s" % obatom.GetType()
type_int += get_typeval(obatom)
while len(bonds) < 24:
bonds += "- "
# print "%-24s" % bonds,
# print "%7.3f ID: %05i" % (qdiff[i], type_int)
name = atom.OBAtom.GetType()[0]
if type_int not in stats[name].keys():
stats[name][type_int] = []
stats[name][type_int].append(qdiff[i])
for atom in ATOMS:
all_values = []
for bond in sorted(stats[atom]):
values = np.array(stats[atom][bond])
typeval_to_string(bond)
print "%2s -- %12s %7.4f %7.4f %3i" % (atom, typeval_to_string(bond), np.mean(values), np.std(values),
len(values))
all_values += stats[atom][bond]
if len(all_values) < 1:
continue
rmsd = np.sqrt(np.mean(np.square(np.array(all_values))))
mean = np.mean(np.array(all_values))
print "%s: RMSD = %4.2f mean = %4.2f" % (atom, rmsd, mean)
|
andersx/dftbfit
|
scripts/print_stats.py
|
Python
|
bsd-2-clause
| 3,263
|
[
"CHARMM",
"Pybel"
] |
82d061d7c81a27750971093eb1d0a01185bd324fbf9c896af40c90f9a3946efa
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# operations.py
#
# Copyright 2016 Bruno S <bruno@oac.unc.edu.ar>
#
# This file is part of ProperImage (https://github.com/toros-astro/ProperImage)
# License: BSD-3-Clause
# Full Text: https://github.com/toros-astro/ProperImage/blob/master/LICENSE.txt
#
"""operations module from ProperImage,
for coadding and subtracting astronomical images.
Written by Bruno SANCHEZ
PhD of Astromoy - UNC
bruno@oac.unc.edu.ar
Instituto de Astronomia Teorica y Experimental (IATE) UNC
Cordoba - Argentina
Of 301
"""
import logging
import time
import warnings
import sep
import numpy as np
import astroalign as aa
from astropy.stats import sigma_clipped_stats
from scipy import optimize
from scipy.ndimage import center_of_mass
from scipy.ndimage.fourier import fourier_shift
from multiprocessing import Process, Queue
from . import utils as u
from .single_image import SingleImage as si
try:
import cPickle as pickle # noqa
except ImportError:
import pickle
try:
import pyfftw
_fftwn = pyfftw.interfaces.numpy_fft.fftn # noqa
_ifftwn = pyfftw.interfaces.numpy_fft.ifftn # noqa
except ImportError:
_fftwn = np.fft.fft2
_ifftwn = np.fft.ifft2
aa.PIXEL_TOL = 0.5
eps = np.finfo(np.float64).eps
def subtract(
ref,
new,
align=False,
inf_loss=0.25,
smooth_psf=False,
beta=True,
shift=True,
iterative=False,
fitted_psf=True,
):
"""
Function that takes a list of SingleImage instances
and performs a stacking using properimage R estimator
Parameters:
-----------
align : bool
Whether to align the images before subtracting, default to False
inf_loss : float
Value of information loss in PSF estimation, lower limit is 0,
upper is 1. Only valid if fitted_psf=False. Default is 0.25
smooth_psf : bool
Whether to smooth the PSF, using a noise reduction technique.
Default to False.
beta : bool
Specify if using the relative flux scale estimation.
Default to True.
shift : bool
Whether to include a shift parameter in the iterative
methodology, in order to correct for misalignments.
Default to True.
iterative : bool
Specify if an iterative estimation of the subtraction relative
flux scale must be used. Default to False.
fitted_psf : bool
Whether to use a Gaussian fitted PSF. Overrides the use of
auto-psf determination. Default to True.
Returns:
--------
D : np.ndarray(n, m) of float
Subtracion image, Zackay's decorrelated D.
P : np.ndarray(n, m) of float
Subtracion image PSF. This is a full PSF image, with a size equal to D
S_corr : np.ndarray of float
Subtracion image S, Zackay's cross-correlated D x P
mix_mask : np.ndarray of bool
Mask of bad pixels for subtracion image, with True marking bad pixels
"""
logger = logging.getLogger()
if fitted_psf:
from .single_image import SingleImageGaussPSF as SI
logger.info("Using single psf, gaussian modeled")
else:
from .single_image import SingleImage as SI
if not isinstance(ref, SI):
try:
ref = SI(ref, smooth_psf=smooth_psf)
except: # noqa
try:
ref = SI(ref.data, smooth_psf=smooth_psf)
except: # noqa
raise
if not isinstance(new, SI):
try:
new = SI(new, smooth_psf=smooth_psf)
except: # noqa
try:
new = SI(new.data, smooth_psf=smooth_psf)
except: # noqa
raise
if align:
registrd, registrd_mask = aa.register(new.data, ref.data)
new._clean()
# should it be new = type(new)( ?
new = SI(
registrd[: ref.data.shape[0], : ref.data.shape[1]],
mask=registrd_mask[: ref.data.shape[0], : ref.data.shape[1]],
borders=False,
smooth_psf=smooth_psf,
)
# new.data = registered
# new.data.mask = registered.mask
# make sure that the alignement has delivered arrays of size
if new.data.data.shape != ref.data.data.shape:
raise ValueError("N and R arrays are of different size")
t0 = time.time()
mix_mask = np.ma.mask_or(new.data.mask, ref.data.mask)
zps, meanmags = u.transparency([ref, new])
ref.zp = zps[0]
new.zp = zps[1]
n_zp = new.zp
r_zp = ref.zp
a_ref, psf_ref = ref.get_variable_psf(inf_loss)
a_new, psf_new = new.get_variable_psf(inf_loss)
if fitted_psf:
# I already know that a_ref and a_new are None, both of them
# And each psf is a list, first element a render,
# second element a model
p_r = psf_ref[1]
p_n = psf_new[1]
p_r.x_mean = ref.data.data.shape[0] / 2.0
p_r.y_mean = ref.data.data.shape[1] / 2.0
p_n.x_mean = new.data.data.shape[0] / 2.0
p_n.y_mean = new.data.data.shape[1] / 2.0
p_r.bounding_box = None
p_n.bounding_box = None
p_n = p_n.render(np.zeros(new.data.data.shape))
p_r = p_r.render(np.zeros(ref.data.data.shape))
dx_ref, dy_ref = center_of_mass(p_r) # [0])
dx_new, dy_new = center_of_mass(p_n) # [0])
else:
p_r = psf_ref[0]
p_n = psf_new[0]
dx_ref, dy_ref = center_of_mass(p_r) # [0])
dx_new, dy_new = center_of_mass(p_n) # [0])
if dx_new < 0.0 or dy_new < 0.0:
raise ValueError("Imposible to acquire center of PSF inside stamp")
psf_ref_hat = _fftwn(p_r, s=ref.data.shape, norm="ortho")
psf_new_hat = _fftwn(p_n, s=new.data.shape, norm="ortho")
psf_ref_hat[np.where(psf_ref_hat.real == 0)] = eps
psf_new_hat[np.where(psf_new_hat.real == 0)] = eps
psf_ref_hat_conj = psf_ref_hat.conj()
psf_new_hat_conj = psf_new_hat.conj()
D_hat_r = fourier_shift(psf_new_hat * ref.interped_hat, (-dx_new, -dy_new))
D_hat_n = fourier_shift(psf_ref_hat * new.interped_hat, (-dx_ref, -dy_ref))
norm_b = ref.var ** 2 * psf_new_hat * psf_new_hat_conj
norm_a = new.var ** 2 * psf_ref_hat * psf_ref_hat_conj
new_back = sep.Background(new.interped).back()
ref_back = sep.Background(ref.interped).back()
gamma = new_back - ref_back
b = n_zp / r_zp
norm = np.sqrt(norm_a + norm_b * b ** 2)
if beta:
if shift: # beta==True & shift==True
def cost(vec):
b, dx, dy = vec
gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2)
norm = np.sqrt(norm_a + norm_b * b ** 2)
dhn = D_hat_n / norm
dhr = D_hat_r / norm
b_n = (
_ifftwn(dhn, norm="ortho")
- _ifftwn(fourier_shift(dhr, (dx, dy)), norm="ortho") * b
- np.roll(gammap, (int(round(dx)), int(round(dy))))
)
border = 100
cost = np.ma.MaskedArray(b_n.real, mask=mix_mask, fill_value=0)
cost = cost[border:-border, border:-border]
cost = np.sum(np.abs(cost / (cost.shape[0] * cost.shape[1])))
return cost
ti = time.time()
vec0 = [b, 0.0, 0.0]
bounds = ([0.1, -0.9, -0.9], [10.0, 0.9, 0.9])
solv_beta = optimize.least_squares(
cost,
vec0,
xtol=1e-5,
jac="3-point",
method="trf",
bounds=bounds,
)
tf = time.time()
if solv_beta.success:
logger.info(("Found that beta = {}".format(solv_beta.x)))
logger.info(("Took only {} awesome seconds".format(tf - ti)))
logger.info(
("The solution was with cost {}".format(solv_beta.cost))
)
b, dx, dy = solv_beta.x
else:
logger.info("Least squares could not find our beta :(")
logger.info("Beta is overriden to be the zp ratio again")
b = n_zp / r_zp
dx = 0.0
dy = 0.0
elif iterative: # beta==True & shift==False & iterative==True
bi = b
def F(b):
gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2)
norm = np.sqrt(norm_a + norm_b * b ** 2)
b_n = (
_ifftwn(D_hat_n / norm, norm="ortho")
- gammap
- b * _ifftwn(D_hat_r / norm, norm="ortho")
)
# robust_stats = lambda b: sigma_clipped_stats(
# b_n(b).real[100:-100, 100:-100])
cost = np.ma.MaskedArray(b_n.real, mask=mix_mask, fill_value=0)
return np.sum(np.abs(cost))
ti = time.time()
solv_beta = optimize.minimize_scalar(
F,
method="bounded",
bounds=[0.1, 10.0],
options={"maxiter": 1000},
)
tf = time.time()
if solv_beta.success:
logger.info(("Found that beta = {}".format(solv_beta.x)))
logger.info(("Took only {} awesome seconds".format(tf - tf)))
b = solv_beta.x
else:
logger.info("Least squares could not find our beta :(")
logger.info("Beta is overriden to be the zp ratio again")
b = n_zp / r_zp
dx = dy = 0.0
else: # beta==True & shift==False & iterative==False
bi = b
def F(b):
gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2)
norm = np.sqrt(norm_a + norm_b * b ** 2)
b_n = (
_ifftwn(D_hat_n / norm, norm="ortho")
- gammap
- b * _ifftwn(D_hat_r / norm, norm="ortho")
)
cost = np.ma.MaskedArray(b_n.real, mask=mix_mask, fill_value=0)
return np.sum(np.abs(cost))
ti = time.time()
solv_beta = optimize.least_squares(
F, bi, ftol=1e-8, bounds=[0.1, 10.0], jac="2-point"
)
tf = time.time()
if solv_beta.success:
logger.info(("Found that beta = {}".format(solv_beta.x)))
logger.info(("Took only {} awesome seconds".format(tf - tf)))
logger.info(
("The solution was with cost {}".format(solv_beta.cost))
)
b = solv_beta.x
else:
logger.info("Least squares could not find our beta :(")
logger.info("Beta is overriden to be the zp ratio again")
b = n_zp / r_zp
dx = dy = 0.0
else:
if shift: # beta==False & shift==True
bi = n_zp / r_zp
gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2)
norm = np.sqrt(norm_a + norm_b * b ** 2)
dhn = D_hat_n / norm
dhr = D_hat_r / norm
def cost(vec):
dx, dy = vec
b_n = (
_ifftwn(dhn, norm="ortho")
- _ifftwn(fourier_shift(dhr, (dx, dy)), norm="ortho") * b
- np.roll(gammap, (int(round(dx)), int(round(dy))))
)
border = 100
cost = np.ma.MaskedArray(b_n.real, mask=mix_mask, fill_value=0)
cost = cost[border:-border, border:-border]
cost = np.sum(np.abs(cost / (cost.shape[0] * cost.shape[1])))
return cost
ti = time.time()
vec0 = [0.0, 0.0]
bounds = ([-0.9, -0.9], [0.9, 0.9])
solv_beta = optimize.least_squares(
cost,
vec0,
xtol=1e-5,
jac="3-point",
method="trf",
bounds=bounds,
)
tf = time.time()
if solv_beta.success:
logger.info(("Found that shift = {}".format(solv_beta.x)))
logger.info(("Took only {} awesome seconds".format(tf - ti)))
logger.info(
("The solution was with cost {}".format(solv_beta.cost))
)
dx, dy = solv_beta.x
else:
logger.info("Least squares could not find our shift :(")
dx = 0.0
dy = 0.0
else: # beta==False & shift==False
b = new.zp / ref.zp
dx = 0.0
dy = 0.0
norm = norm_a + norm_b * b ** 2
if dx == 0.0 and dy == 0.0:
D_hat = (D_hat_n - b * D_hat_r) / np.sqrt(norm)
else:
D_hat = (D_hat_n - fourier_shift(b * D_hat_r, (dx, dy))) / np.sqrt(
norm
)
D = _ifftwn(D_hat, norm="ortho")
if np.any(np.isnan(D.real)):
pass
d_zp = b / np.sqrt(ref.var ** 2 * b ** 2 + new.var ** 2)
P_hat = (psf_ref_hat * psf_new_hat * b) / (np.sqrt(norm) * d_zp)
P = _ifftwn(P_hat, norm="ortho").real
dx_p, dy_p = center_of_mass(P)
dx_pk, dy_pk = [val[0] for val in np.where(P == np.max(P))]
if (np.abs(dx_p - dx_pk) > 30) or (np.abs(dx_p - dx_pk) > 30):
logger.info("Resetting PSF center of mass to peak")
dx_p = dx_pk
dy_p = dy_pk
S_hat = fourier_shift(d_zp * D_hat * P_hat.conj(), (dx_p, dy_p))
kr = _ifftwn(
new.zp * psf_ref_hat_conj * b * psf_new_hat * psf_new_hat_conj / norm,
norm="ortho",
)
kn = _ifftwn(
new.zp * psf_new_hat_conj * psf_ref_hat * psf_ref_hat_conj / norm,
norm="ortho",
)
V_en = _ifftwn(
_fftwn(new.data.filled(0) + 1.0, norm="ortho")
* _fftwn(kn ** 2, s=new.data.shape),
norm="ortho",
)
V_er = _ifftwn(
_fftwn(ref.data.filled(0) + 1.0, norm="ortho")
* _fftwn(kr ** 2, s=ref.data.shape),
norm="ortho",
)
S_corr = _ifftwn(S_hat, norm="ortho") / np.sqrt(V_en + V_er)
logger.info("S_corr sigma_clipped_stats ")
logger.info(
(
"mean = {}, median = {}, std = {}\n".format(
*sigma_clipped_stats(S_corr.real.flatten(), sigma=4.0)
)
)
)
logger.info(
("Subtraction performed in {} seconds\n\n".format(time.time() - t0))
)
return D, P, S_corr.real, mix_mask
def diff(*args, **kwargs):
warnings.warn(
"This is being deprecated in favour of `subtract`", DeprecationWarning
)
return subtract(*args, **kwargs)
class StackCombinator(Process):
"""Combination engine.
An engine for image combination in parallel, using multiprocessing.Process
class.
Uses an ensemble of images and a queue to calculate the propercoadd of
the list of images.
Parameters
----------
img_list: list or tuple
list of SingleImage instances used in the combination process
queue: multiprocessing.Queue instance
an instance of multiprocessing.Queue class where to pickle the
intermediate results.
shape: shape of the images being coadded.
stack: boolean, default True
Whether to stack the results for coadd or just obtain individual
image calculations.
If True it will pickle in queue a coadded image of the chunk's images.
If False it will pickle in queue a list of individual matched filtered
images.
fourier: boolean, default False.
Whether to calculate individual fourier transform of each s_component
image.
If stack is True this parameter will be ignored.
If stack is False, and fourier is True, the pickled object will be a
tuple of two values, with the first one containing the list of
s_components, and the second one containing the list of fourier
transformed s_components.
Returns
-------
Combinator process
An instance of Combinator.
This can be launched like a multiprocessing.Process
Example
-------
queue1 = multiprocessing.Queue()
queue2 = multiprocessing.Queue()
p1 = Combinator(list1, queue1)
p2 = Combinator(list2, queue2)
p1.start()
p2.start()
#results are in queues
result1 = queue1.get()
result2 = queue2.get()
p1.join()
p2.join()
"""
def __init__(
self,
img_list,
queue,
shape,
stack=True,
fourier=False,
*args,
**kwargs,
):
super(StackCombinator, self).__init__(*args, **kwargs)
self.list_to_combine = img_list
self.queue = queue
self.global_shape = shape
logging.getLogger("StackCombinator").info(self.global_shape)
# self.zps = ensemble.transparencies
def run(self):
S_hat = np.zeros(self.global_shape).astype(np.complex128)
psf_hat_sum = np.zeros(self.global_shape).astype(np.complex128)
mix_mask = self.list_to_combine[0].data.mask
for an_img in self.list_to_combine:
np.add(an_img.s_hat_comp, S_hat, out=S_hat, casting="same_kind")
np.add(
((an_img.zp / an_img.var) ** 2) * an_img.psf_hat_sqnorm(),
psf_hat_sum,
out=psf_hat_sum,
) # , casting='same_kind')
# psf_hat_sum = ((an_img.zp/an_img.var)**2)*an_img.psf_hat_sqnorm()
mix_mask = np.ma.mask_or(mix_mask, an_img.data.mask)
serialized = pickle.dumps([S_hat, psf_hat_sum, mix_mask])
self.queue.put(serialized)
return
def coadd(si_list, align=True, inf_loss=0.2, n_procs=2):
"""Function that takes a list of SingleImage instances
and performs a stacking using properimage R estimator
"""
logger = logging.getLogger()
for i_img, animg in enumerate(si_list):
if not isinstance(animg, si):
si_list[i_img] = si(animg)
if align:
img_list = u._align_for_coadd(si_list)
for an_img in img_list:
an_img.update_sources()
else:
img_list = si_list
shapex = np.min([an_img.data.shape[0] for an_img in img_list])
shapey = np.min([an_img.data.shape[1] for an_img in img_list])
global_shape = (shapex, shapey)
zps, meanmags = u.transparency(img_list)
for j, an_img in enumerate(img_list):
an_img.zp = zps[j]
an_img._setup_kl_a_fields(inf_loss)
psf_shapes = [an_img.stamp_shape[0] for an_img in img_list]
psf_shape = np.max(psf_shapes)
psf_shape = (psf_shape, psf_shape)
if n_procs > 1:
queues = []
procs = []
for chunk in u.chunk_it(img_list, n_procs):
queue = Queue()
proc = StackCombinator(
chunk, queue, shape=global_shape, stack=True, fourier=False
)
logger.info("starting new process")
proc.start()
queues.append(queue)
procs.append(proc)
logger.info("all chunks started, and procs appended")
S_hat = np.zeros(global_shape, dtype=np.complex128)
P_hat = np.zeros(global_shape, dtype=np.complex128)
mix_mask = np.zeros(global_shape, dtype=np.bool)
for q in queues:
serialized = q.get()
logger.info("loading pickles")
s_hat_comp, psf_hat_sum, mask = pickle.loads(serialized)
np.add(s_hat_comp, S_hat, out=S_hat) # , casting='same_kind')
np.add(psf_hat_sum, P_hat, out=P_hat) # , casting='same_kind')
mix_mask = np.ma.mask_or(mix_mask, mask)
P_r_hat = np.sqrt(P_hat)
P_r = _ifftwn(fourier_shift(P_r_hat, psf_shape))
P_r = P_r / np.sum(P_r)
R = _ifftwn(S_hat / np.sqrt(P_hat))
logger.info("S calculated, now starting to join processes")
for proc in procs:
logger.info("waiting for procs to finish")
proc.join()
logger.info("processes finished, now returning R")
else:
S_hat = np.zeros(global_shape, dtype=np.complex128)
P_hat = np.zeros(global_shape, dtype=np.complex128)
mix_mask = img_list[0].data.mask
for an_img in img_list:
np.add(an_img.s_hat_comp, S_hat, out=S_hat)
np.add(
((an_img.zp / an_img.var) ** 2) * an_img.psf_hat_sqnorm(),
P_hat,
out=P_hat,
)
mix_mask = np.ma.mask_or(mix_mask, an_img.data.mask)
P_r_hat = np.sqrt(P_hat)
P_r = _ifftwn(fourier_shift(P_r_hat, psf_shape))
P_r = P_r / np.sum(P_r)
R = _ifftwn(S_hat / P_r_hat)
return R, P_r, mix_mask
def stack_R(*args, **kwargs):
warnings.warn(
"This is being deprecated in favour of `coadd`", DeprecationWarning
)
return coadd(*args, **kwargs)
|
toros-astro/ProperImage
|
properimage/operations.py
|
Python
|
bsd-3-clause
| 20,993
|
[
"Gaussian"
] |
aab16876eff77edf482243f59ead1d44447d8bcc74a686f8b7db5257e05cd146
|
#!/usr/bin/python
#=============================================================================================
# example files for reading in MD simulation files and performing
# statistical analyses according to manuscript "Simple tests for
# validity when sampling from thermodynamic ensembles", Michael
# R. Shirts.
#
# COPYRIGHT NOTICE
#
# Written by Michael R. Shirts <mrshirts@gmail.com>.
#
# Copyright (c) 2012 The University of Virginia. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# =============================================================================================
#
#===================================================================================================
# IMPORTS
#===================================================================================================
import numpy
import timeseries
import checkensemble
import optparse, sys
from optparse import OptionParser
import readmdfiles
parser = OptionParser()
parser.add_option("-f", "--replica_data", dest="metafile", help="prefix of the replica datafiles")
parser.add_option("-k", "--nolikelihood", dest="bMaxLikelihood", action="store_false",default=True,
help="Don't run maximum likelihood analysis [default = run this analysis]")
parser.add_option("-l", "--nolinearfit", dest="bLinearFit", action="store_false",default=True,
help="Don't run linear fit analysis [default = run this analysis]")
parser.add_option("-n", "--nononlinearfit", dest="bNonLinearFit", action="store_false",default=True,
help="Don't run linear fit analysis [default = run this analysis]")
parser.add_option("-e", "--energytype", dest="type", default="total",
help="the type of energy that is being analyzed [default = %default]")
parser.add_option("-b", "--nboot", dest="nboots", type="int",default=200,
help="number of bootstrap samples performed [default = %default]")
parser.add_option("-i", "--nbins", dest="nbins",type = "int", default=30,
help="number of bins for bootstrapping [default = %default]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",default=False,
help="more verbosity")
parser.add_option("-g", "--figurename", dest="figname", default='figure.pdf',
help="name for the figure")
parser.add_option("-s", "--seed", dest="seed", type = "int", default=None,
help="random seed for bootstrap sampling")
parser.add_option("-c", "--efficiency", dest="efficiency", type = "float", default=None,
help="statistical efficiency to overwrite the calculated statistical efficiency")
parser.add_option("-u", "--useefficiency", dest="useg", type = "string", default='subsample',
help= "calculate the efficiency by scaling the uncertainty, or by subsampling the input data")
parser.add_option("--filetype", dest="filetype", type = "string", default='flatfile',
help= "specified the type of the file analyzed. options are gromacs .xvg, charmm output, desmond .ene, and flat files")
(options, args) = parser.parse_args()
if options.metafile is None:
print "\nQuitting: No files were input!\n"
sys.exit()
filetypes_supported = ['flatfile','gromacs','charmm','desmond']
onlyE = ['potential', 'kinetic', 'total']
requireV = ['enthalpy', 'volume', 'jointEV']
requireN = ['helmholtz', 'number', 'jointEN']
alltypes = onlyE + requireV + requireN
type = options.type
if type in requireN:
print "Error: replica exchange testing not implemented for grand canonical ensemble yet."
if not (type in alltypes):
print "type of energy %s isn't defined!" % (type)
print "Must be one of ",
print alltypes
sys.exit()
if type in onlyE:
analysis_type = 'dbeta-constV'
elif (type == 'enthalpy'):
analysis_type = 'dbeta-constP'
elif (type == 'volume'):
analysis_type = 'dpressure-constB'
elif (type == 'jointEV'):
analysis_type = 'dbeta-dpressure'
elif (type == 'helmholtz'):
analysis_type = 'dbeta-constmu'
elif (type == 'number'):
analysis_type = 'dmu-constB'
elif (type == 'jointEN'):
analysis_type = 'dbeta-dmu'
else:
print "analysis type %s not defined: I'll go with total energy" % (type)
analysis_type = 'dbeta-constV'
if (not(options.useg == 'scale' or options.useg == 'subsample')):
print "Error: for -u, only options \'scale\' and \'subsample\' allowed"
sys.exit()
#===================================================================================================
# Read metadata.
#===================================================================================================
infile = open(options.metafile, 'r')
lines = infile.readlines()
infile.close()
datafiles = []
T_k = []
P_k = []
K = 0
for line in lines:
if line[0] != '#':
elements = line.split()
K+=1;
numcol = len(elements)
printcol = numcol-1
datafiles.append(elements[0])
if analysis_type == 'dbeta-constV':
if (numcol != 2):
print "Warning! Expecting one temperature entry, getting a different number (%d) of entries!" % (printcol)
T_k.append(float(elements[1]))
elif analysis_type == 'dbeta-constP':
if (numcol != 3):
print "Warning! Expecting one temperature and on pressure entry, getting a different number (%d) of entries!" % (printcol)
T_k.append(float(elements[1]))
P_k.append(float(elements[2]))
elif analysis_type == 'dpressure-constB':
if (numcol != 2):
print "Warning! Expecting one pressure entry, getting a different number (%d) of entries!" % (printcol)
P_k.append(float(elements[1]))
elif analysis_type == 'dbeta-dpressure':
if (numcol != 3):
print "Warning! Expecting one temperature and on pressure entry, getting a different number (%d) of entries!" % (printcol)
T_k.append(float(elements[1]))
P_k.append(float(elements[2]))
T_k = numpy.array(T_k)
P_k = numpy.array(P_k)
#===================================================================================================
# CONSTANTS
#===================================================================================================
verbose = options.verbose
nboots = options.nboots
nbins = options.nbins
bMaxLikelihood = options.bMaxLikelihood
bNonLinearFit = options.bNonLinearFit
bLinearFit = options.bLinearFit
figname = options.figname
if not (options.filetype in filetypes_supported):
print "Error: for -filetype, I currently only know about filetypes",
print filetypes_supported
sys.exit()
if type[0:5] == 'joint':
bLinearFit = False
bNonLinearFit = False
bMaxLikelhood = True
print "For joint simulations, can only run maximum likelihood, overwriting other options"
if (verbose):
print "verbosity is %s" % (str(verbose))
print "Energy type is %s" % (type)
for k in range(K):
print "%dth temperature is %f" % (k,T_k[k])
if ((type == 'volume') or (type == 'enthalpy') or (type == 'jointPV')):
print "%dth pressure is %f" % (k,P_k[k])
print "Number of bootstraps is %d" % (nboots)
print "Number of bins (not used for maximum likelihood) is %d" % (nbins)
if (bMaxLikelihood):
print "Generating maximum likelihood statistics"
else:
print "Not generating maximum likelihood statistics"
if (bLinearFit):
print "Generating linear fit statistics"
else:
print "Not generating linear fit statistics"
if (bNonLinearFit):
print "Generating nonlinear fit statistics"
else:
print "Not generating nonlinear fit statistics"
print "Figures will be named %s" % (figname)
# Shouldn't need to modify below this for standard usage
# ------------------------
kB = 1.3806488*6.0221415/1000.0 # Boltzmann's constant (kJ/mol/K) - gromacs default
kJperkcal = 4.184 # unit conversion factor
nm3perA3 = 0.001
N_k = numpy.zeros([K],int) # number of samples at each state
# check just size of all files
N_size = numpy.zeros(K,int)
filenames = []
for k in range(K):
filename = datafiles[k]
filenames.append(filename)
print "checking size of file \#%d named %s..." % (k+1,filenames[k])
infile = open(filename, 'r')
lines = infile.readlines()
infile.close()
N_size[k] = len(lines)
N_max = numpy.max(N_size)
U_kn = numpy.zeros([K,N_max], dtype=numpy.float64) # U_kn[k,n] is the energy of the sample k,n
V_kn = numpy.zeros([K,N_max], dtype=numpy.float64) # V_kn[k,n] is the energy of the sample k,n
N_kn = numpy.zeros([K,N_max], dtype=numpy.float64) # N_kn[k,n], but replica exchange doesn't support different chemical potentials yet.
for k in range(K):
# Read contents of file into memory.
print "Reading %s..." % filenames[k]
infile = open(filenames[k], 'r')
lines = infile.readlines()
infile.close()
if (options.filetype == 'flatfile'): # assumes kJ/mol energies, nm3 volumes
U_kn[k,:],V_kn[k,:],N_kn[k,:],N_k[k] = readmdfiles.read_flatfile(lines,type,N_max)
elif (options.filetype == 'gromacs'):
U_kn[k,:],V_kn[k,:],N_kn[k,:],N_k[k] = readmdfiles.read_gromacs(lines,type,N_max)
elif (options.filetype == 'charmm'):
U_kn[k,:],V_kn[k,:],N_kn[k,:],N_k[k] = readmdfiles.read_charmm(lines,type,N_max)
U_kn[k,:] *= kJperkcal
V_kn[k,:] *= nm3perA3
elif (options.filetype == 'desmond'):
U_kn[k,:],V_kn[k,:],N_kn[k,:],N_k[k] = readmdfiles.read_desmond(lines,type,N_max)
U_kn[k,:] *= kJperkcal
V_kn[k,:] *= nm3perA3
else:
print "The file type %s isn't defined!" % (options.filetype)
sys.exit()
# compute correlation times for the data
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k.
print "Now determining correlation time"
if (options.efficiency is None):
g = readmdfiles.getefficiency(N_k,U_kn,V_kn,N_kn,type)
else:
g = options.efficiency*numpy.ones(K)
print "statistical inefficiency taken from input options is %f" % (options.efficiency)
if (options.useg == 'subsample'):
readmdfiles.subsample(N_k,U_kn,V_kn,N_kn,g,type)
else:
print "statistical efficiencies used to scale the statistical uncertained determined from all data"
figname = options.figname
title = options.figname
for k in range(K-1):
print 'Now analyzing replicas %d and %d' % (k,k+1)
twoN = numpy.array([N_k[k],N_k[k+1]])
if (type in onlyE) or (type == 'enthalpy') or (type == 'jointEV'):
twoT = numpy.array([T_k[k],T_k[k+1]])
else:
twoT = None
if type in requireV:
twoP = numpy.array([P_k[k],P_k[k+1]])
else:
twoP = None
twoU = U_kn[k:k+2,:]
twoV = V_kn[k:k+2,:]
checkensemble.ProbabilityAnalysis(twoN,type=analysis_type,T_k=twoT,P_k=twoP,U_kn=twoU,V_kn=twoV,nbins=nbins,
reptype='bootstrap',g=g,nboots=nboots,bMaxwell=(type=='kinetic'),figname='replica',bLinearFit=bLinearFit,bNonLinearFit=bNonLinearFit,bMaxLikelihood=bMaxLikelihood,seed=options.seed)
# now, we construct a graph with all of the lines. We could write
# the probability analysis to do it, but better to do new specially designed plot here.
|
shirtsgroup/checkensemble
|
examples/analyze-replica.py
|
Python
|
gpl-2.0
| 12,048
|
[
"CHARMM",
"Desmond",
"Gromacs"
] |
28d63fc9212cb62a38122deb520cfadd4a2c8560fc9585e711bb983718245a1a
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.SetSize(200, 200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
wavelet = vtk.vtkRTAnalyticSource()
wavelet.SetWholeExtent(-100, 100, -100, 100, 0, 0)
wavelet.SetCenter(0, 0, 0)
wavelet.SetMaximum(255)
wavelet.SetStandardDeviation(.5)
wavelet.SetXFreq(60)
wavelet.SetYFreq(30)
wavelet.SetZFreq(40)
wavelet.SetXMag(10)
wavelet.SetYMag(18)
wavelet.SetZMag(5)
wavelet.SetSubsampleRate(1)
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(wavelet.GetOutputPort())
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(warp.GetOutputPort())
mapper.SetScalarRange(75, 290)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(-10)
renWin.AddRenderer(renderer)
# render the image
#
iren.Initialize()
#iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/General/Testing/Python/WarpScalarImage.py
|
Python
|
bsd-3-clause
| 1,131
|
[
"VTK"
] |
f057afed909b9046babb23cff99fb4e6cac465effb727a72245b08101677cffc
|
from numpy import linalg
from RandomVariables import *
from Hypotheses import HypothesisCollection
class LinearRegression:
""" Use Occam's razor to select among one of several possible hypotheses.
Each hypothesis models target values using
\[ t = y (x, w) + \epsilon, \]
where
\[ y (x, w) = \sum_{j = 0}^{M-1} w_j \phi_j (x) \]
is some choice of basis functions $\phi_j$, and $\epsilon$ is Gaussian noise.
"""
def __init__(self, collectionOfHypotheses, observationNoise):
assert isinstance(collectionOfHypotheses, HypothesisCollection)
assert np.isreal(observationNoise)
self.hypotheses = collectionOfHypotheses # list of hypotheses
self.numHyp = len(collectionOfHypotheses) # number of hypotheses
self.XHist = np.array([]) # list of all past x values: dynamic
self.THist = np.array([]) # list of all past t values: dynamic
self.sigma = observationNoise # Gaussian noise on the target values
self.parameter = [] # k-th entry: p(w|data, H_k)
# history of past fitting parameters m and S
self.mHist = [[] for i in range(self.numHyp)]
self.SHist = [[] for i in range(self.numHyp)]
self.probHyp = [[] for i in range(self.numHyp)]
# History of past Phi matrices, one per each hypothesis
self.Phis = [np.ndarray((0,self.hypotheses[i].M)) for i in range(self.numHyp)]
for k, hyp in enumerate(self.hypotheses):
assert isinstance(hyp.parameterPrior, DiagonalGaussianRV)
# set p(H_k|"nodata") = p(H_k)
self.probHyp[k].append(self.hypotheses.prior.evaluate(k))
# set $p(w|"no data", H_k) = p(w|H_k)$ for all k:
# (i.e. set the prior of the regression to the natural prior of the hypothesis)
self.parameter.append(GaussianRV(hyp.parameterPrior.mean, hyp.parameterPrior.variance))
def update(self, newX, newT):
""" Bayesian linear regression update.
For every hypothesis k this method computes the posterior probability
p(w|THist, newT, H_k)
This depends strongly on all Gaussian assumptions!
:param newX: New value of the input variable (transposed)
:param newT: New value of the target variable (transposed)
:return:
"""
#############################################################
# first: parameter estimation for each hypothesis
#############################################################
self.XHist = np.append(self.XHist, newX)
self.THist = np.append(self.THist, newT)
for k, (hyp, currentPara) in enumerate(zip(self.hypotheses, self.parameter)):
# get Phi (depends on newX) from hypothesis k
Phi = hyp.evaluate(newX)
# new covariance matrix
SNInv = currentPara.inv_variance + np.dot(np.transpose(Phi), Phi) / self.sigma ** 2
temp = np.dot(currentPara.inv_variance, currentPara.mean) + \
np.transpose(Phi) * newT / self.sigma ** 2
currentPara.inv_variance = SNInv # update inverse variance and variance
# SN = linalg.inv(SNInv) is not needed, as setter of GaussianDistribution calculates it
mN = np.dot(currentPara.variance, temp)
# update all other variables
currentPara.mean = mN
self.mHist[k].append(mN)
self.SHist[k].append(currentPara.variance)
#############################################################
# second: model selection
#############################################################
unnormalizedEvidence = np.zeros((self.numHyp, 1))
for k, (hyp, currentPara) in enumerate(zip(self.hypotheses, self.parameter)):
# get sigma_W from hypothesis: SIGMA = sigmaWSQ * Id_M
sigmaWSQ = hyp.parameterPrior.factor
# Update Phi matrix from all past data with current data
self.Phis[k] = np.vstack((self.Phis[k], hyp.evaluate(newX)))
Phi = self.Phis[k] # alias
N = Phi.shape[0] # number of data points
M = Phi.shape[1] # number of parameters w
# model selection:
A = (np.dot(np.transpose(Phi), Phi) / self.sigma ** 2 + np.eye(M, M) / sigmaWSQ)
# numerator 1 of model selection formula
# w^T*Phi (the y-Value according to the fitted model)
modelMeanOfT = np.dot(Phi, currentPara.mean)
randomVariable = DiagonalGaussianRV(modelMeanOfT, self.sigma ** 2)
temp = self.THist.reshape(N, -1)
# the probability of t given y (when there is noise)
num1 = randomVariable.evaluate(temp)
# numerator 2 of model selection formula:
# just the prior probability of the MAP-estimation
num2 = hyp.parameterPrior.evaluate(currentPara.mean)
# denominator of model selection formula:
denom = math.sqrt(linalg.det(A / (2 * np.pi)))
# unnormalized Evidence
unnormalizedEvidence[k] = num1 * num2 / denom
# normalize and save
normalization = np.sum(unnormalizedEvidence)
for k, prob in enumerate(self.probHyp):
normed = unnormalizedEvidence[k] / normalization
prob.append(normed)
def update_old(self, newX, newT): #newX=x',newT=t'
""" Original implementation
:param newX:
:param newT:
:return:
"""
# ------------------------------------------------
# first: parameter estimation for each hypothesis
# ------------------------------------------------
self.XHist = np.append(self.XHist, newX)
self.THist = np.append(self.THist, newT)
for k in range(self.numHyp): # for every hypothesis
# get priors
m = self.parameter[k].mean
SInv = self.parameter[k].inv_variance
# get Phi (depends on newX) from hypothesis k
Phi = self.hypotheses[k].evaluate([newX])
# new covariance matrix
SNInv = SInv + np.dot(np.transpose(Phi), Phi)/self.sigma**2
# SN = linalg.inverse(SNInv) is not needed, as setter
# of class GaussianDistribution calculates it automatically
temp = np.dot(SInv, m) + np.transpose(Phi)*newT/self.sigma**2
SN = linalg.inv(SNInv)
mN = np.dot(SN, temp)
# update all variables
self.parameter[k].mean = mN
self.parameter[k].inv_variance = SNInv
# variance gets calculated inside class automatically
self.mHist[k].append(mN)
self.SHist[k].append(self.parameter[k].variance)
# --------------------------------------------------
# second: model selection
# --------------------------------------------------
# initialize all needed containers
PhiL = []
mL = []
SL = []
unnormalizedEvidence = np.zeros((self.numHyp, 1))
for k in range(self.numHyp):
# get sigma_W from hypothesis: SIGMA = sigmaWSQ * Id_M
sigmaWSQ = self.hypotheses[k].parameterPrior.factor
# build huge Phi matrix from all past data and insert in index k
# so PhiL is a list of Phi matrices
PhiL.append(self.hypotheses[k].evaluate(self.XHist))
Phi = PhiL[k] # matrix Phi for the current hypothesis
N = Phi.shape[0] # number of data points
M = Phi.shape[1] # number of parameters w
# model selection:
A = (np.dot(np.transpose(Phi), Phi)/self.sigma**2 +
np.eye(M, M)/sigmaWSQ)
currentPara = self.parameter[k]
# numerator 1 of model selection formula
# w^T*Phi (the y-Value according to the fitted model)
modelMeanOfT = np.dot(Phi, currentPara.mean)
randomVariable = GaussianRV(modelMeanOfT,
self.sigma**2*np.eye(N, N))
temp = self.THist.reshape(N, -1)
# the probability of t given y (when there is noise)
num1 = randomVariable.evaluate(temp)
currentHypo = self.hypotheses[k]
# numerator 2 of model selection formula:
# just the prior probability of the MAP-estimation
num2 = currentHypo.parameterPrior.evaluate(currentPara.mean)
# denominator of model selection formula:
denom = math.sqrt(linalg.det(A/(2*np.pi)))
# unnormalized Evidence
unnormalizedEvidence[k] = num1*num2/denom
normalization = np.sum(unnormalizedEvidence)
for k in range(self.numHyp): # normalize and save
normed = unnormalizedEvidence[k]/normalization
#print(normed)
self.probHyp[k].append(normed)
|
mdbenito/ModelSelection
|
src/ModelSelection.py
|
Python
|
gpl-3.0
| 9,042
|
[
"Gaussian"
] |
dd9d42e2553a9729f6faf8bfe31745148f8c48ea539ecbaf1ff3d061be2ae619
|
import numpy as np
import pytest
import psi4
from psi4.driver.procrouting.response.scf_products import (TDRSCFEngine,
TDUSCFEngine)
from .utils import compare_arrays, compare_values
def build_RHF_AB_C1_singlet(wfn):
mints = psi4.core.MintsHelper(wfn.basisset())
Co = wfn.Ca_subset("SO", "OCC")
Cv = wfn.Ca_subset("SO", "VIR")
V_iajb = mints.mo_eri(Co, Cv, Co, Cv).to_array()
V_abij = mints.mo_eri(Cv, Cv, Co, Co).to_array()
Fab = psi4.core.triplet(Cv, wfn.Fa(), Cv, True, False, False).to_array()
Fij = psi4.core.triplet(Co, wfn.Fa(), Co, True, False, False).to_array()
ni = Fij.shape[0]
na = Fab.shape[0]
nia = ni * na
A_ref = np.einsum("ab,ij->iajb", Fab, np.eye(ni))
A_ref -= np.einsum("ab,ij->iajb", np.eye(na), Fij)
A_ref += 2 * V_iajb - np.einsum("abij->iajb", V_abij)
B_ref = 2 * V_iajb - V_iajb.swapaxes(0, 2)
return A_ref, B_ref
def build_RHF_AB_singlet(wfn):
mints = psi4.core.MintsHelper(wfn.basisset())
Co = wfn.Ca_subset("SO", "OCC")
Cv = wfn.Ca_subset("SO", "VIR")
Fab = psi4.core.triplet(Cv, wfn.Fa(), Cv, True, False, False).to_array()
Fij = psi4.core.triplet(Co, wfn.Fa(), Co, True, False, False).to_array()
# mo_eri can't handle systems with symmetry. We need to work around this.
ao_eri = mints.ao_eri()
ao2so = wfn.aotoso()
# The h'th irrep stores the block where ia has symmetry h.
# The elements are indexed by ov pairs. Elements are in ascending order
# of the occupied element of the pair.
A_blocks = []
B_blocks = []
for hjb in range(wfn.nirrep()):
hia = hjb
A_block = []
B_block = []
for hi in range(wfn.nirrep()):
A_block.append([])
B_block.append([])
ha = hia ^ hi
Ca = np.matmul(ao2so.nph[ha], Cv.nph[ha])
Ci = np.matmul(ao2so.nph[hi], Co.nph[hi])
for hj in range(wfn.nirrep()):
hb = hjb ^ hj
Cb = np.matmul(ao2so.nph[hb], Cv.nph[hb])
Cj = np.matmul(ao2so.nph[hj], Co.nph[hj])
V_iajb = np.einsum("pqrs, pP, qQ, rR, sS -> PQRS", ao_eri, Ci, Ca, Cj, Cb, optimize=True)
V_jaib = np.einsum("pqrs, pP, qQ, rR, sS -> PQRS", ao_eri, Cj, Ca, Ci, Cb, optimize=True)
V_abij = np.einsum("pqrs, pP, qQ, rR, sS -> PQRS", ao_eri, Ca, Cb, Ci, Cj, optimize=True)
A_ref = 2 * V_iajb - np.einsum("abij->iajb", V_abij)
if ha == hb and hi == hj:
A_ref += np.einsum("ab,ij->iajb", Fab[ha], np.eye(Fij[hi].shape[0]), optimize=True)
A_ref -= np.einsum("ij,ab->iajb", Fij[hi], np.eye(Fab[ha].shape[0]), optimize=True)
B_ref = 2 * V_iajb - V_jaib.swapaxes(0, 2)
shape_tuple = (A_ref.shape[0] * A_ref.shape[1], A_ref.shape[2] * A_ref.shape[3])
A_block[-1].append(A_ref.reshape(shape_tuple))
B_block[-1].append(B_ref.reshape(shape_tuple))
A_blocks.append(np.block(A_block))
B_blocks.append(np.block(B_block))
return A_blocks, B_blocks
def build_RHF_AB_C1_singlet_df(wfn):
orb = wfn.get_basisset("ORBITAL")
mints = psi4.core.MintsHelper(orb)
Co = wfn.Ca_subset("SO", "OCC")
Cv = wfn.Ca_subset("SO", "VIR")
zero_bas = psi4.core.BasisSet.zero_ao_basis_set()
aux = wfn.get_basisset("DF_BASIS_SCF")
Ppq = np.squeeze(mints.ao_eri(aux, zero_bas, orb, orb))
metric = mints.ao_eri(aux, zero_bas, aux, zero_bas)
metric.power(-0.5, 1.e-14)
metric = np.squeeze(metric)
Qpq = np.einsum("QP,Ppq->Qpq", metric, Ppq, optimize=True)
Qij = np.einsum("Qpq, pi, qj -> Qij", Qpq, Co, Co)
Qab = np.einsum("Qpq, pa, qb -> Qab", Qpq, Cv, Cv)
Qia = np.einsum("Qpq, pi, qa -> Qia", Qpq, Co, Cv)
V_iajb = np.einsum("Qia, Qjb -> iajb", Qia, Qia)
V_abij = np.einsum("Qab, Qij -> abij", Qab, Qij)
Fab = psi4.core.triplet(Cv, wfn.Fa(), Cv, True, False, False).to_array()
Fij = psi4.core.triplet(Co, wfn.Fa(), Co, True, False, False).to_array()
ni = Fij.shape[0]
na = Fab.shape[0]
nia = ni * na
A_ref = np.einsum("ab,ij->iajb", Fab, np.eye(ni))
A_ref -= np.einsum("ab,ij->iajb", np.eye(na), Fij)
A_ref += 2 * V_iajb - np.einsum("abij->iajb", V_abij)
B_ref = 2 * V_iajb - V_iajb.swapaxes(0, 2)
return A_ref, B_ref
def build_RHF_AB_singlet_df(wfn):
orb = wfn.get_basisset("ORBITAL")
mints = psi4.core.MintsHelper(wfn.basisset())
Co = wfn.Ca_subset("SO", "OCC")
Cv = wfn.Ca_subset("SO", "VIR")
Fab = psi4.core.triplet(Cv, wfn.Fa(), Cv, True, False, False).to_array()
Fij = psi4.core.triplet(Co, wfn.Fa(), Co, True, False, False).to_array()
zero_bas = psi4.core.BasisSet.zero_ao_basis_set()
aux = wfn.get_basisset("DF_BASIS_SCF")
Ppq = np.squeeze(mints.ao_eri(aux, zero_bas, orb, orb))
metric = mints.ao_eri(aux, zero_bas, aux, zero_bas)
metric.power(-0.5, 1.e-14)
metric = np.squeeze(metric)
Qpq = np.einsum("QP,Ppq->Qpq", metric, Ppq, optimize=True)
ao2so = wfn.aotoso()
# The h'th irrep stores the block where ia has symmetry h.
# The elements are indexed by ov pairs. Elements are in ascending order
# of the occupied element of the pair.
A_blocks = []
B_blocks = []
for hjb in range(wfn.nirrep()):
hia = hjb
A_block = []
B_block = []
for hi in range(wfn.nirrep()):
A_block.append([])
B_block.append([])
ha = hia ^ hi
Ca = np.matmul(ao2so.nph[ha], Cv.nph[ha])
Ci = np.matmul(ao2so.nph[hi], Co.nph[hi])
for hj in range(wfn.nirrep()):
hb = hjb ^ hj
Cb = np.matmul(ao2so.nph[hb], Cv.nph[hb])
Cj = np.matmul(ao2so.nph[hj], Co.nph[hj])
Qij = np.einsum("Ppq, pi, qj -> Pij", Qpq, Ci, Cj, optimize=True)
Qab = np.einsum("Ppq, pa, qb -> Pab", Qpq, Ca, Cb, optimize=True)
Qia = np.einsum("Ppq, pi, qa -> Pia", Qpq, Ci, Ca, optimize=True)
Qjb = np.einsum("Ppq, pj, qb -> Pjb", Qpq, Cj, Cb, optimize=True)
Qja = np.einsum("Ppq, pj, qa -> Pja", Qpq, Cj, Ca, optimize=True)
Qib = np.einsum("Ppq, pi, qb -> Pib", Qpq, Ci, Cb, optimize=True)
V_iajb = np.einsum("Pia, Pjb -> iajb", Qia, Qjb, optimize=True)
V_jaib = np.einsum("Pia, Pjb -> iajb", Qja, Qib, optimize=True)
V_abij = np.einsum("Pij, Pab -> abij", Qij, Qab, optimize=True)
A_ref = 2 * V_iajb - np.einsum("abij->iajb", V_abij)
if ha == hb and hi == hj:
A_ref += np.einsum("ab,ij->iajb", Fab[ha], np.eye(Fij[hi].shape[0]), optimize=True)
A_ref -= np.einsum("ij,ab->iajb", Fij[hi], np.eye(Fab[ha].shape[0]), optimize=True)
B_ref = 2 * V_iajb - V_jaib.swapaxes(0, 2)
shape_tuple = (A_ref.shape[0] * A_ref.shape[1], A_ref.shape[2] * A_ref.shape[3])
A_block[-1].append(A_ref.reshape(shape_tuple))
B_block[-1].append(B_ref.reshape(shape_tuple))
A_blocks.append(np.block(A_block))
B_blocks.append(np.block(B_block))
return A_blocks, B_blocks
def build_RHF_AB_C1_triplet(wfn):
mints = psi4.core.MintsHelper(wfn.basisset())
Co = wfn.Ca_subset("SO", "OCC")
Cv = wfn.Ca_subset("SO", "VIR")
V_iajb = mints.mo_eri(Co, Cv, Co, Cv).to_array()
V_abij = mints.mo_eri(Cv, Cv, Co, Co).to_array()
Fab = psi4.core.triplet(Cv, wfn.Fa(), Cv, True, False, False).to_array()
Fij = psi4.core.triplet(Co, wfn.Fa(), Co, True, False, False).to_array()
ni = Fij.shape[0]
na = Fab.shape[0]
nia = ni * na
A_ref = np.einsum("ab,ij->iajb", Fab, np.eye(ni))
A_ref -= np.einsum("ab,ij->iajb", np.eye(na), Fij)
A_ref -= np.einsum("abij->iajb", V_abij)
B_ref = -V_iajb.swapaxes(0, 2)
return A_ref, B_ref
def build_UHF_AB_C1(wfn):
mints = psi4.core.MintsHelper(wfn.basisset())
CI = wfn.Ca_subset("SO", "OCC")
CA = wfn.Ca_subset("SO", "VIR")
V_IAJB = mints.mo_eri(CI, CA, CI, CA).to_array()
V_ABIJ = mints.mo_eri(CA, CA, CI, CI).to_array()
FAB = psi4.core.triplet(CA, wfn.Fa(), CA, True, False, False).to_array()
FIJ = psi4.core.triplet(CI, wfn.Fa(), CI, True, False, False).to_array()
nI = FIJ.shape[0]
nA = FAB.shape[0]
nIA = nI * nA
A = {}
B = {}
A['IAJB'] = np.einsum("AB,IJ->IAJB", FAB, np.eye(nI))
A['IAJB'] -= np.einsum("AB,IJ->IAJB", np.eye(nA), FIJ)
A['IAJB'] += V_IAJB
A['IAJB'] -= np.einsum("ABIJ->IAJB", V_ABIJ)
B['IAJB'] = V_IAJB - V_IAJB.swapaxes(0, 2)
Ci = wfn.Cb_subset("SO", "OCC")
Ca = wfn.Cb_subset("SO", "VIR")
V_iajb = mints.mo_eri(Ci, Ca, Ci, Ca).to_array()
V_abij = mints.mo_eri(Ca, Ca, Ci, Ci).to_array()
Fab = psi4.core.triplet(Ca, wfn.Fb(), Ca, True, False, False).to_array()
Fij = psi4.core.triplet(Ci, wfn.Fb(), Ci, True, False, False).to_array()
ni = Fij.shape[0]
na = Fab.shape[0]
nia = ni * na
A['iajb'] = np.einsum("ab,ij->iajb", Fab, np.eye(ni))
A['iajb'] -= np.einsum("ab,ij->iajb", np.eye(na), Fij)
A['iajb'] += V_iajb
A['iajb'] -= np.einsum('abij->iajb', V_abij)
B['iajb'] = V_iajb - V_iajb.swapaxes(0, 2)
V_IAjb = mints.mo_eri(CI, CA, Ci, Ca).to_array()
V_iaJB = mints.mo_eri(Ci, Ca, CI, CA).to_array()
A['IAjb'] = V_IAjb
A['iaJB'] = V_iaJB
B['IAjb'] = V_IAjb
B['iaJB'] = V_iaJB
return A, B
@pytest.mark.unittest
@pytest.mark.tdscf
def test_restricted_TDA_singlet_c1():
"Build out the full CIS/TDA hamiltonian (A) col by col with the product engine"
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
symmetry c1
""")
psi4.set_options({"scf_type": "pk", 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=h2o, return_wfn=True)
A_ref, _ = build_RHF_AB_C1_singlet(wfn)
ni, na, _, _ = A_ref.shape
nia = ni * na
A_ref = A_ref.reshape((nia, nia))
# Build engine
eng = TDRSCFEngine(wfn, ptype='tda', triplet=False)
# our "guess"" vectors
ID = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
A_test = np.column_stack([x.to_array().flatten() for x in eng.compute_products(ID)[0]])
assert compare_arrays(A_ref, A_test, 8, "RHF Ax C1 products")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_restricted_TDA_singlet():
"Build out the full CIS/TDA hamiltonian (A) col by col with the product engine"
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
""")
psi4.set_options({"scf_type": "pk", 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=h2o, return_wfn=True)
A_blocks, B_blocks = build_RHF_AB_singlet(wfn)
eng = TDRSCFEngine(wfn, ptype='tda', triplet=False)
vir_dim = wfn.nmopi() - wfn.doccpi()
for hia, A_block in enumerate(A_blocks):
ID = []
# Construct a matrix for each (O, V) pair with hia symmetry.
for hi in range(wfn.nirrep()):
for i in range(wfn.Ca_subset("SO", "OCC").coldim()[hi]):
for a in range(wfn.Ca_subset("SO", "VIR").coldim()[hi ^ hia]):
matrix = psi4.core.Matrix("Test Matrix", wfn.doccpi(), vir_dim, hia)
matrix.set(hi, i, a, 1)
ID.append(matrix)
x = eng.compute_products(ID)[0][0]
# Assemble the A values as a single (ia, jb) matrix, all possible ia and jb of symmetry hia.
A_test = np.column_stack([np.concatenate([y.flatten() for y in x.to_array()]) for x in eng.compute_products(ID)[0]])
assert compare_arrays(A_block, A_test, 8, "RHF Ax C2v products")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_restricted_TDA_singlet_df_c1():
"Build out the full CIS/TDA hamiltonian (A) col by col with the product engine"
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
symmetry c1
""")
psi4.set_options({"scf_type": "df", 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=h2o, return_wfn=True)
A_ref, _ = build_RHF_AB_C1_singlet_df(wfn)
ni, na, _, _ = A_ref.shape
nia = ni * na
A_ref = A_ref.reshape((nia, nia))
# Build engine
eng = TDRSCFEngine(wfn, ptype='tda', triplet=False)
# our "guess"" vectors
ID = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
A_test = np.column_stack([x.to_array().flatten() for x in eng.compute_products(ID)[0]])
assert compare_arrays(A_ref, A_test, 8, "DF-RHF Ax C1 products")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_restricted_TDA_singlet_df():
"Build out the full CIS/TDA hamiltonian (A) col by col with the product engine"
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
""")
psi4.set_options({"scf_type": "df", 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=h2o, return_wfn=True)
A_blocks, B_blocks = build_RHF_AB_singlet_df(wfn)
eng = TDRSCFEngine(wfn, ptype='tda', triplet=False)
vir_dim = wfn.nmopi() - wfn.doccpi()
for hia, A_block in enumerate(A_blocks):
ID = []
# Construct a matrix for each (O, V) pair with hia symmetry.
for hi in range(wfn.nirrep()):
for i in range(wfn.Ca_subset("SO", "OCC").coldim()[hi]):
for a in range(wfn.Ca_subset("SO", "VIR").coldim()[hi ^ hia]):
matrix = psi4.core.Matrix("Test Matrix", wfn.doccpi(), vir_dim, hia)
matrix.set(hi, i, a, 1)
ID.append(matrix)
x = eng.compute_products(ID)[0][0]
# Assemble the A values as a single (ia, jb) matrix, all possible ia and jb of symmetry hia.
A_test = np.column_stack([np.concatenate([y.flatten() for y in x.to_array()]) for x in eng.compute_products(ID)[0]])
assert compare_arrays(A_block, A_test, 8, "DF-RHF Ax C2v products")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_restricted_TDA_triplet_c1():
"Build out the full CIS/TDA hamiltonian (A) col by col with the product engine"
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
symmetry c1
""")
psi4.set_options({"scf_type": "pk", 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=h2o, return_wfn=True)
A_ref, _ = build_RHF_AB_C1_triplet(wfn)
ni, na, _, _ = A_ref.shape
nia = ni * na
A_ref = A_ref.reshape((nia, nia))
# Build engine
eng = TDRSCFEngine(wfn, ptype='tda', triplet=True)
# our "guess"" vectors
ID = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
A_test = np.column_stack([x.to_array().flatten() for x in eng.compute_products(ID)[0]])
assert compare_arrays(A_ref, A_test, 8, "RHF Ax C1 products")
@pytest.mark.unittest
@pytest.mark.tdscf
@pytest.mark.check_triplet
def test_RU_TDA_C1():
h2o = psi4.geometry("""0 1
O 0.000000 0.000000 0.135446
H -0.000000 0.866812 -0.541782
H -0.000000 -0.866812 -0.541782
symmetry c1
no_reorient
no_com
""")
psi4.set_options({"scf_type": "pk", 'save_jk': True})
e, wfn = psi4.energy("hf/sto-3g", molecule=h2o, return_wfn=True)
A_ref, _ = build_UHF_AB_C1(wfn)
ni, na, _, _ = A_ref['IAJB'].shape
nia = ni * na
A_sing_ref = A_ref['IAJB'] + A_ref['IAjb']
A_sing_ref = A_sing_ref.reshape(nia, nia)
A_trip_ref = A_ref['IAJB'] - A_ref['IAjb']
A_trip_ref = A_trip_ref.reshape(nia, nia)
sing_vals, _ = np.linalg.eigh(A_sing_ref)
trip_vals, _ = np.linalg.eigh(A_trip_ref)
trip_eng = TDRSCFEngine(wfn, ptype='tda', triplet=True)
sing_eng = TDRSCFEngine(wfn, ptype='tda', triplet=False)
ID = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
psi4.core.print_out("\nA sing:\n" + str(A_sing_ref) + "\n\n")
psi4.core.print_out("\nA trip:\n" + str(A_trip_ref) + "\n\n")
A_trip_test = np.column_stack([x.to_array().flatten() for x in trip_eng.compute_products(ID)[0]])
assert compare_arrays(A_trip_ref, A_trip_test, 8, "Triplet Ax C1 products")
A_sing_test = np.column_stack([x.to_array().flatten() for x in sing_eng.compute_products(ID)[0]])
assert compare_arrays(A_sing_ref, A_sing_test, 8, "Singlet Ax C1 products")
sing_vals_2, _ = np.linalg.eigh(A_sing_test)
trip_vals_2, _ = np.linalg.eigh(A_trip_test)
psi4.core.print_out("\n\n SINGLET EIGENVALUES\n")
for x, y in zip(sing_vals, sing_vals_2):
psi4.core.print_out("{:10.6f} {:10.6f}\n".format(x, y))
# assert compare_values(x, y, 4, "Singlet ROOT")
psi4.core.print_out("\n\n Triplet EIGENVALUES\n")
for x, y in zip(trip_vals, trip_vals_2):
psi4.core.print_out("{:10.6f} {:10.6f}\n".format(x, y))
# assert compare_values(x, y, 4, "Triplet Root")
for x, y in zip(sing_vals, sing_vals_2):
assert compare_values(x, y, 4, "Singlet ROOT")
for x, y in zip(trip_vals, trip_vals_2):
assert compare_values(x, y, 4, "Triplet Root")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_restricted_RPA_singlet_c1():
"Build out the full CIS/TDA hamiltonian (A) col by col with the product engine"
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
symmetry c1
""")
psi4.set_options({"scf_type": "pk", 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=h2o, return_wfn=True)
A_ref, B_ref = build_RHF_AB_C1_singlet(wfn)
ni, na, _, _ = A_ref.shape
nia = ni * na
A_ref = A_ref.reshape((nia, nia))
B_ref = B_ref.reshape((nia, nia))
P_ref = A_ref + B_ref
M_ref = A_ref - B_ref
# Build engine
eng = TDRSCFEngine(wfn, ptype='rpa', triplet=False)
# our "guess"" vectors
ID = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
Px, Mx = eng.compute_products(ID)[:-1]
P_test = np.column_stack([x.to_array().flatten() for x in Px])
assert compare_arrays(P_ref, P_test, 8, "RHF (A+B)x C1 products")
M_test = np.column_stack([x.to_array().flatten() for x in Mx])
assert compare_arrays(M_ref, M_test, 8, "RHF (A-B)x C1 products")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_restricted_RPA_triplet_c1():
"Build out the full CIS/TDA hamiltonian (A) col by col with the product engine"
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
symmetry c1
""")
psi4.set_options({"scf_type": "pk", 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=h2o, return_wfn=True)
A_ref, B_ref = build_RHF_AB_C1_triplet(wfn)
ni, na, _, _ = A_ref.shape
nia = ni * na
A_ref = A_ref.reshape((nia, nia))
B_ref = B_ref.reshape((nia, nia))
P_ref = A_ref + B_ref
M_ref = A_ref - B_ref
# Build engine
eng = TDRSCFEngine(wfn, ptype='rpa', triplet=True)
# our "guess"" vectors
ID = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
Px, Mx = eng.compute_products(ID)[:-1]
P_test = np.column_stack([x.to_array().flatten() for x in Px])
assert compare_arrays(P_ref, P_test, 8, "RHF (A+B)x C1 products")
M_test = np.column_stack([x.to_array().flatten() for x in Mx])
assert compare_arrays(M_ref, M_test, 8, "RHF (A-B)x C1 products")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_unrestricted_TDA_C1():
ch2 = psi4.geometry("""
0 3
c
h 1 1.0
h 1 1.0 2 125.0
symmetry c1
""")
psi4.set_options({"scf_type": "pk", 'reference': 'UHF', 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=ch2, return_wfn=True)
A_ref, B_ref = build_UHF_AB_C1(wfn)
nI, nA, _, _ = A_ref['IAJB'].shape
nIA = nI * nA
ni, na, _, _ = A_ref['iajb'].shape
nia = ni * na
eng = TDUSCFEngine(wfn, ptype='tda')
X_jb = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
zero_jb = [psi4.core.Matrix(ni, na) for x in range(nIA)]
X_JB = [psi4.core.Matrix.from_array(v.reshape((nI, nA))) for v in tuple(np.eye(nIA).T)]
zero_JB = [psi4.core.Matrix(nI, nA) for x in range(nia)]
# Guess Identity:
# X_I0 X_0I = X_I0 X_0I
# [ I{nOV x nOV} | 0{nOV x nov}] = [ X{KC,JB} | 0{KC, jb}]
# [ 0{nov x nOV} | I{nov x nov}] [ 0{kc,JB} | X{kc, jb}]
# Products:
# [ A{IA, KC} A{IA, kc}] [ I{KC, JB} | 0{KC,jb}] = [A x X_I0] = [ A_IAJB, A_iaJB]
# [ A{ia, KC} A{ia, kc}] [ O{kc, JB} | X{kc,jb}] [A x X_0I] = [ A_IAjb, A_iajb]
X_I0 = [[x, zero] for x, zero in zip(X_JB, zero_jb)]
X_0I = [[zero, x] for zero, x in zip(zero_JB, X_jb)]
Ax_I0 = eng.compute_products(X_I0)[0]
Ax_0I = eng.compute_products(X_0I)[0]
A_IAJB_test = np.column_stack([x[0].to_array().flatten() for x in Ax_I0])
assert compare_arrays(A_ref['IAJB'].reshape(nIA, nIA), A_IAJB_test, 8, "A_IAJB")
A_iaJB_test = np.column_stack([x[1].to_array().flatten() for x in Ax_I0])
assert compare_arrays(A_ref['iaJB'].reshape(nia, nIA), A_iaJB_test, 8, "A_iaJB")
A_IAjb_test = np.column_stack([x[0].to_array().flatten() for x in Ax_0I])
assert compare_arrays(A_ref['IAjb'].reshape(nIA, nia), A_IAjb_test, 8, "A_IAjb")
A_iajb_test = np.column_stack([x[1].to_array().flatten() for x in Ax_0I])
assert compare_arrays(A_ref['iajb'].reshape(nia, nia), A_iajb_test, 8, "A_iajb")
@pytest.mark.unittest
@pytest.mark.tdscf
def test_unrestricted_RPA_C1():
ch2 = psi4.geometry("""
0 3
c
h 1 1.0
h 1 1.0 2 125.0
symmetry c1
""")
psi4.set_options({"scf_type": "pk", 'reference': 'UHF', 'save_jk': True})
e, wfn = psi4.energy("hf/cc-pvdz", molecule=ch2, return_wfn=True)
A_ref, B_ref = build_UHF_AB_C1(wfn)
nI, nA, _, _ = A_ref['IAJB'].shape
nIA = nI * nA
ni, na, _, _ = A_ref['iajb'].shape
nia = ni * na
P_ref = {k: A_ref[k] + B_ref[k] for k in A_ref.keys()}
M_ref = {k: A_ref[k] - B_ref[k] for k in A_ref.keys()}
eng = TDUSCFEngine(wfn, ptype='rpa')
X_jb = [psi4.core.Matrix.from_array(v.reshape((ni, na))) for v in tuple(np.eye(nia).T)]
zero_jb = [psi4.core.Matrix(ni, na) for x in range(nIA)]
X_JB = [psi4.core.Matrix.from_array(v.reshape((nI, nA))) for v in tuple(np.eye(nIA).T)]
zero_JB = [psi4.core.Matrix(nI, nA) for x in range(nia)]
# Guess Identity:
# X_I0 X_0I = X_I0 X_0I
# [ I{nOV x nOV} | 0{nOV x nov}] = [ X{KC,JB} | 0{KC, jb}]
# [ 0{nov x nOV} | I{nov x nov}] [ 0{kc,JB} | X{kc, jb}]
# Products:
# [ A+/-B{IA, KC} A+/-B{IA, kc}] [ I{KC, JB} | 0{KC,jb}] = [A+/-B x X_I0] = [ (A+/-B)_IAJB, (A+/-B)_iaJB]
# [ A+/-B{ia, KC} A+/-B{ia, kc}] [ O{kc, JB} | X{kc,jb}] [A+/-B x X_0I] = [ (A+/-B)_IAjb, (A+/-B)_iajb]
X_I0 = [[x, zero] for x, zero in zip(X_JB, zero_jb)]
X_0I = [[zero, x] for zero, x in zip(zero_JB, X_jb)]
Px_I0, Mx_I0 = eng.compute_products(X_I0)[:-1]
Px_0I, Mx_0I = eng.compute_products(X_0I)[:-1]
P_IAJB_test = np.column_stack([x[0].to_array().flatten() for x in Px_I0])
assert compare_arrays(P_ref['IAJB'].reshape(nIA, nIA), P_IAJB_test, 8, "A_IAJB")
M_IAJB_test = np.column_stack([x[0].to_array().flatten() for x in Mx_I0])
assert compare_arrays(M_ref['IAJB'].reshape(nIA, nIA), M_IAJB_test, 8, "A_IAJB")
P_iaJB_test = np.column_stack([x[1].to_array().flatten() for x in Px_I0])
assert compare_arrays(P_ref['iaJB'].reshape(nia, nIA), P_iaJB_test, 8, "P_iaJB")
M_iaJB_test = np.column_stack([x[1].to_array().flatten() for x in Mx_I0])
assert compare_arrays(M_ref['iaJB'].reshape(nia, nIA), M_iaJB_test, 8, "M_iaJB")
P_IAjb_test = np.column_stack([x[0].to_array().flatten() for x in Px_0I])
assert compare_arrays(P_ref['IAjb'].reshape(nIA, nia), P_IAjb_test, 8, "P_IAjb")
M_IAjb_test = np.column_stack([x[0].to_array().flatten() for x in Mx_0I])
assert compare_arrays(M_ref['IAjb'].reshape(nIA, nia), M_IAjb_test, 8, "M_IAjb")
P_iajb_test = np.column_stack([x[1].to_array().flatten() for x in Px_0I])
assert compare_arrays(P_ref['iajb'].reshape(nia, nia), P_iajb_test, 8, "P_iajb")
M_iajb_test = np.column_stack([x[1].to_array().flatten() for x in Mx_0I])
assert compare_arrays(M_ref['iajb'].reshape(nia, nia), M_iajb_test, 8, "M_iajb")
|
lothian/psi4
|
tests/pytests/test_tdscf_products.py
|
Python
|
lgpl-3.0
| 24,422
|
[
"Psi4"
] |
d07a5416ed9b638d8dd26cb4b4fcac4e7bce843cca045c8e065fbf80c0945f64
|
# $HeadURL$
__RCSID__ = "$Id$"
import re
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities import List
import DIRAC.Core.Security.File as File
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security.BaseSecurity import BaseSecurity
class MyProxy( BaseSecurity ):
def uploadProxy( self, proxy = False, useDNAsUserName = False ):
"""
Upload a proxy to myproxy service.
proxy param can be:
: Default -> use current proxy
: string -> upload file specified as proxy
: X509Chain -> use chain
"""
retVal = File.multiProxyArgument( proxy )
if not retVal[ 'OK' ]:
return retVal
proxyDict = retVal[ 'Value' ]
chain = proxyDict[ 'chain' ]
proxyLocation = proxyDict[ 'file' ]
timeLeft = int( chain.getRemainingSecs()[ 'Value' ] / 3600 )
cmdArgs = [ '-n' ]
cmdArgs.append( '-s "%s"' % self._secServer )
cmdArgs.append( '-c "%s"' % ( timeLeft - 1 ) )
cmdArgs.append( '-t "%s"' % self._secMaxProxyHours )
cmdArgs.append( '-C "%s"' % proxyLocation )
cmdArgs.append( '-y "%s"' % proxyLocation )
if useDNAsUserName:
cmdArgs.append( '-d' )
else:
retVal = self._getUsername( chain )
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyDict )
return retVal
mpUsername = retVal[ 'Value' ]
cmdArgs.append( '-l "%s"' % mpUsername )
mpEnv = self._getExternalCmdEnvironment()
#Hack to upload properly
mpEnv[ 'GT_PROXY_MODE' ] = 'old'
cmd = "myproxy-init %s" % " ".join( cmdArgs )
result = shellCall( self._secCmdTimeout, cmd, env = mpEnv )
File.deleteMultiProxy( proxyDict )
if not result['OK']:
errMsg = "Call to myproxy-init failed: %s" % retVal[ 'Message' ]
return S_ERROR( errMsg )
status, output, error = result['Value']
# Clean-up files
if status:
errMsg = "Call to myproxy-init failed"
extErrMsg = 'Command: %s; StdOut: %s; StdErr: %s' % ( cmd, result, error )
return S_ERROR( "%s %s" % ( errMsg, extErrMsg ) )
return S_OK()
def getDelegatedProxy( self, proxyChain, lifeTime = 604800, useDNAsUserName = False ):
"""
Get delegated proxy from MyProxy server
return S_OK( X509Chain ) / S_ERROR
"""
#TODO: Set the proxy coming in proxyString to be the proxy to use
#Get myproxy username diracgroup:diracuser
retVal = File.multiProxyArgument( proxyChain )
if not retVal[ 'OK' ]:
return retVal
proxyDict = retVal[ 'Value' ]
chain = proxyDict[ 'chain' ]
proxyLocation = proxyDict[ 'file' ]
retVal = self._generateTemporalFile()
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyDict )
return retVal
newProxyLocation = retVal[ 'Value' ]
# myproxy-get-delegation works only with environment variables
cmdEnv = self._getExternalCmdEnvironment()
if self._secRunningFromTrustedHost:
cmdEnv['X509_USER_CERT'] = self._secCertLoc
cmdEnv['X509_USER_KEY'] = self._secKeyLoc
if 'X509_USER_PROXY' in cmdEnv:
del cmdEnv['X509_USER_PROXY']
else:
cmdEnv['X509_USER_PROXY'] = proxyLocation
cmdArgs = []
cmdArgs.append( "-s '%s'" % self._secServer )
cmdArgs.append( "-t '%s'" % ( int( lifeTime / 3600 ) ) )
cmdArgs.append( "-a '%s'" % proxyLocation )
cmdArgs.append( "-o '%s'" % newProxyLocation )
if useDNAsUserName:
cmdArgs.append( '-d' )
else:
retVal = self._getUsername( chain )
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyDict )
return retVal
mpUsername = retVal[ 'Value' ]
cmdArgs.append( '-l "%s"' % mpUsername )
cmd = "myproxy-logon %s" % " ".join( cmdArgs )
gLogger.verbose( "myproxy-logon command:\n%s" % cmd )
result = shellCall( self._secCmdTimeout, cmd, env = cmdEnv )
File.deleteMultiProxy( proxyDict )
if not result['OK']:
errMsg = "Call to myproxy-logon failed: %s" % result[ 'Message' ]
File.deleteMultiProxy( proxyDict )
return S_ERROR( errMsg )
status, output, error = result['Value']
# Clean-up files
if status:
errMsg = "Call to myproxy-logon failed"
extErrMsg = 'Command: %s; StdOut: %s; StdErr: %s' % ( cmd, result, error )
File.deleteMultiProxy( proxyDict )
return S_ERROR( "%s %s" % ( errMsg, extErrMsg ) )
chain = X509Chain()
retVal = chain.loadProxyFromFile( newProxyLocation )
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyDict )
return S_ERROR( "myproxy-logon failed when reading delegated file: %s" % retVal[ 'Message' ] )
File.deleteMultiProxy( proxyDict )
return S_OK( chain )
def getInfo( self, proxyChain, useDNAsUserName = False ):
"""
Get info from myproxy server
return S_OK( { 'username' : myproxyusername,
'owner' : owner DN,
'timeLeft' : secs left } ) / S_ERROR
"""
#TODO: Set the proxy coming in proxyString to be the proxy to use
#Get myproxy username diracgroup:diracuser
retVal = File.multiProxyArgument( proxyChain )
if not retVal[ 'OK' ]:
return retVal
proxyDict = retVal[ 'Value' ]
chain = proxyDict[ 'chain' ]
proxyLocation = proxyDict[ 'file' ]
# myproxy-get-delegation works only with environment variables
cmdEnv = self._getExternalCmdEnvironment()
if self._secRunningFromTrustedHost:
cmdEnv['X509_USER_CERT'] = self._secCertLoc
cmdEnv['X509_USER_KEY'] = self._secKeyLoc
if 'X509_USER_PROXY' in cmdEnv:
del cmdEnv['X509_USER_PROXY']
else:
cmdEnv['X509_USER_PROXY'] = proxyLocation
cmdArgs = []
cmdArgs.append( "-s '%s'" % self._secServer )
if useDNAsUserName:
cmdArgs.append( '-d' )
else:
retVal = self._getUsername( chain )
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyDict )
return retVal
mpUsername = retVal[ 'Value' ]
cmdArgs.append( '-l "%s"' % mpUsername )
cmd = "myproxy-info %s" % " ".join( cmdArgs )
gLogger.verbose( "myproxy-info command:\n%s" % cmd )
result = shellCall( self._secCmdTimeout, cmd, env = cmdEnv )
File.deleteMultiProxy( proxyDict )
if not result['OK']:
errMsg = "Call to myproxy-info failed: %s" % result[ 'Message' ]
File.deleteMultiProxy( proxyDict )
return S_ERROR( errMsg )
status, output, error = result['Value']
# Clean-up files
if status:
errMsg = "Call to myproxy-info failed"
extErrMsg = 'Command: %s; StdOut: %s; StdErr: %s' % ( cmd, result, error )
return S_ERROR( "%s %s" % ( errMsg, extErrMsg ) )
infoDict = {}
usernameRE = re.compile( "username\s*:\s*(\S*)" )
ownerRE = re.compile( "owner\s*:\s*(\S*)" )
timeLeftRE = re.compile( "timeleft\s*:\s*(\S*)" )
for line in List.fromChar( output, "\n" ):
match = usernameRE.search( line )
if match:
infoDict[ 'username' ] = match.group( 1 )
match = ownerRE.search( line )
if match:
infoDict[ 'owner' ] = match.group( 1 )
match = timeLeftRE.search( line )
if match:
try:
fields = List.fromChar( match.group( 1 ), ":" )
fields.reverse()
secsLeft = 0
for iP in range( len( fields ) ):
if iP == 0:
secsLeft += int( fields[ iP ] )
elif iP == 1:
secsLeft += int( fields[ iP ] ) * 60
elif iP == 2:
secsLeft += int( fields[ iP ] ) * 3600
infoDict[ 'timeLeft' ] = secsLeft
except Exception, x:
print x
return S_OK( infoDict )
|
Sbalbp/DIRAC
|
Core/Security/MyProxy.py
|
Python
|
gpl-3.0
| 7,694
|
[
"DIRAC"
] |
03881f5932015e8cb71a63eff4313714a1e2abcfc3919f78ee0758cace2d3397
|
import threading
from collections import Counter
from pysamimport import pysam
from util import BadRead
from Queue import Queue
import time, math, sys
class Pileups(object):
def __init__(self,loci,samfiles,filter,chrreg):
self.loci = loci
self.samfiles = samfiles
self.filter = filter
self.chrreg = chrreg
class SerialPileups(Pileups):
def iterator(self):
samfiles = []
chrommap = []
for al in self.samfiles:
samfile = pysam.Samfile(al, "rb")
assert samfile.has_index(), "Cannot open BAM index for file %s"%sf
samfiles.append(samfile)
chrommap.append(self.chrreg.chrommap(al))
for snvchr, snvpos, ref, alt, snvextra in self.loci:
cnts = Counter()
total = Counter()
reads = []
snvpos1 = snvpos - 1
for i, samfile in enumerate(samfiles):
try:
snvlabel = chrommap[i](snvchr)
if snvlabel != None:
for pileupcolumn in samfile.pileup(snvlabel, snvpos1, snvpos1 + 1, truncate=True):
total[i] += pileupcolumn.n
for pileupread in pileupcolumn.pileups:
try:
al, pos, base, nseg = self.filter.test(pileupread)
except BadRead, e:
cnts[(i, e.message)] += 1
continue
reads.append((al, pos, base, i))
cnts[(i, 'Good')] += 1
except ValueError:
pass
total[i] -= cnts[(i,"GapInQueryAtSNVLocus")]
# del cnts[(i,"GapInQueryAtSNVLocus")]
yield (snvchr, snvpos, ref, alt, total, reads, cnts)
class ThreadedPileups(Pileups):
def __init__(self,*args,**kw):
super(ThreadedPileups,self).__init__(*args)
self.tpb = kw.get('threadsperbam',1)
self.nb = len(self.samfiles)
self.nt = self.tpb*self.nb
self._queue = []
k = 0;
for j in range(self.tpb):
for i in range(self.nb):
self._queue.append(Queue(20))
t = threading.Thread(target=self.worker,args=(i,j,k))
t.daemon = True
t.start()
k += 1
time.sleep(1)
def worker(self,i,j,k):
samfile = pysam.Samfile(self.samfiles[i], "rb")
assert samfile.has_index(), "Cannot open BAM index for file %s"%sf
chrommap = self.chrreg.chrommap(self.samfiles[i])
# blocksize = int(math.ceil(len(self.loci)/self.tpb))
# for l in range(j*blocksize,min((j+1)*blocksize,len(self.loci))):
for l in range(j,len(self.loci),self.tpb):
snvchr, snvpos, ref, alt, snvextra = self.loci[l]
cnts = Counter()
total = Counter()
reads = []
snvpos1 = snvpos - 1
try:
snvlabel = chrommap(snvchr)
if snvlabel != None:
for pileupcolumn in samfile.pileup(snvlabel, snvpos1, snvpos1 + 1, truncate=True):
total[i] += pileupcolumn.n
for pileupread in pileupcolumn.pileups:
try:
al, pos, base, nseg = self.filter.test(pileupread)
except BadRead, e:
cnts[(i, e.message)] += 1
continue
reads.append((al, pos, base, i))
cnts[(i, 'Good')] += 1
except ValueError, e:
pass # raise e
total[i] -= cnts[(i,"GapInQueryAtSNVLocus")]
# del cnts[(i,"GapInQueryAtSNVLocus")]
# print >>sys.stderr, (snvchr, snvpos, ref, alt, total, cnts)
self._queue[k].put((snvchr, snvpos, ref, alt, total, reads, cnts))
return
def iterator(self):
k = 0
# for i in range(len(self.loci)):
for snvchr, snvpos, ref, alt, snvextra in self.loci:
cnts = Counter()
total = Counter()
reads = []
for i in range(len(self.samfiles)):
snvchri, snvposi, refi, alti, totali, readsi, cntsi = self._queue[k].get()
assert(snvchri == snvchr and snvposi == snvpos)
assert(i in totali or len(totali.keys()) == 0)
reads.extend(readsi)
cnts.update(cntsi)
total.update(totali)
self._queue[k].task_done()
k = (k+1)%self.nt
yield (snvchr, snvpos, ref, alt, total, reads, cnts)
|
HorvathLab/NGS
|
attic/readCounts/src/pileups.py
|
Python
|
mit
| 4,532
|
[
"pysam"
] |
2b17f99d7b05719c872f85d26f5fccdfb701d8ee130ea369cc6b1af63d4aacc2
|
"""
This module defines the components for modified nodal analysis. The components
are defined at the bottom of this file.
Copyright 2015--2022 Michael Hayes, UCECE
"""
from __future__ import print_function
from .expr import expr
from .cexpr import ConstantDomainExpression
from .omegaexpr import AngularFourierDomainExpression
from .symbols import j, omega, jomega, s, t
from .functions import sqrt
from .sym import capitalize_name, omegasym
from .grammar import delimiters
from .immittancemixin import ImmittanceMixin
from .superpositioncurrent import SuperpositionCurrent
from .voltage import voltage
from .current import current
from .opts import Opts
import lcapy
import inspect
import sys
import sympy as sym
from warnings import warn
__all__ = ()
module = sys.modules[__name__]
cptaliases = {'E': 'VCVS', 'F': 'CCCS',
'G': 'VCCS', 'H': 'CCVS',
'r': 'Damper', 'm': 'Mass',
'k': 'Spring'}
class Cpt(ImmittanceMixin):
dependent_source = False
independent_source = False
reactive = False
need_branch_current = False
need_extra_branch_current = False
need_control_current = False
directive = False
flip_branch_current = False
ignore = False
add_series = False
add_parallel = False
equipotential_nodes = ()
is_transformer = False
def __init__(self, cct, namespace, defname, name, cpt_type, cpt_id, string,
opts_string, nodes, keyword, *args):
self.cct = cct
self.type = cpt_type
self.id = cpt_id
self.defname = defname
self.name = name
self.relname = name
self.namespace = ''
self.nodenames = nodes
self.relnodes = nodes
parts = name.split('.')
if len(parts) > 1:
self.namespace = '.'.join(parts[0:-1]) + '.'
self.relname = parts[-1]
self.relnodes = []
for node in nodes:
if node.startswith(self.namespace):
node = node[len(self.namespace):]
self.relnodes.append(node)
self._string = string
#self.net = string.split(';')[0]
self.args = args
self.explicit_args = args
self.classname = self.__class__.__name__
self.keyword = keyword
self.opts = Opts(opts_string)
self.nosim = 'nosim' in self.opts
# No defined cpt
if self.type in ('XX', 'Cable') or self.nosim:
self._cpt = lcapy.oneport.Dummy()
return
if ((args == () and not self.type in ('W', 'O', 'P', 'TP', 'TL'))
or (self.type in ('F', 'H', 'CCCS', 'CCVS') and len(args) == 1)
or (self.type == 'K' and len(args) == 2)):
# Default value is the component name
value = self.type + self.id
if False and self.type in ('V', 'I') and keyword[1] == '':
# This is too subtle and creates havoc with
# the expected behaviour of subs.
value = value[0].lower() + value[1:] + '(t)'
args += (value, )
self.args = args
classname = self.classname
# Handle aliases.
try:
classname = cptaliases[classname]
except:
pass
try:
newclass = getattr(lcapy.oneport, classname)
except:
try:
newclass = getattr(lcapy.twoport, classname)
except:
self._cpt = lcapy.oneport.Dummy()
return
self._cpt = newclass(*args)
def __repr__(self):
return self.__str__()
def __str__(self):
# TODO, use self._netmake() but fix for XX
return self._string
@property
def cpt(self):
return self._cpt
def _stamp(self, mna):
raise NotImplementedError('stamp method not implemented for %s' % self)
def _copy(self):
"""Make copy of net."""
return str(self)
def _arg_format(self, value):
"""Place value string inside curly braces if it contains a delimiter."""
string = str(value)
if string.startswith('{'):
return string
for delimiter in delimiters:
if delimiter in string:
return '{' + string + '}'
return string
def annotate(self, *args, **kwargs):
"""Annotate cpt by adding `kwargs` to opts.
For example, `color='blue'` or `'color=blue'`."""
s = ''
for arg in args:
s += ',' + arg
for key, val in kwargs.items():
s += ',' + key + '=' + val
opts = self.opts.copy()
opts.add(s)
return self._netmake(opts=opts)
def _kill(self):
"""Kill sources."""
return self._copy()
def _subs(self, subs_dict):
"""Substitute values using dictionary of substitutions.
If a scalar is passed, this is substituted for the component value.
For example, given a component, cpt, defined as 'R1 1 2' then
cpt.subs(5) and cpt.subs({'R1': 5}) are equivalent. In both
cases, the result is 'R1 1 2 5'."""
if not isinstance(subs_dict, dict):
subs_dict = {self.args[0]: subs_dict}
return self._netsubs(subs_dict=subs_dict)
def _initialize(self, ic):
"""Change initial condition to ic."""
return self._copy()
def _netsubs(self, node_map=None, zero=False, subs_dict=None):
"""Create a new net description using substitutions in `subs_dict`.
If `node_map` is not `None`, rename the nodes. If `zero` is `True`,
set args to zero."""
string = self.defname
field = 0
for node in self.relnodes:
if node_map is not None:
node = node_map[node]
string += ' ' + node
field += 1
if field == self.keyword[0]:
string += ' ' + self.keyword[1]
field += 1
if subs_dict is None:
args = self.explicit_args
else:
args = self.args
for arg in args:
if zero:
arg = 0
elif subs_dict is not None:
# Perform substitutions
arg = str(expr(arg).subs(subs_dict))
string += ' ' + self._arg_format(arg)
field += 1
if field == self.keyword[0]:
string += ' ' + self.keyword[1]
if len(args) == 0 and self.keyword[0] == 0:
string += ' ' + self.keyword[1]
opts_str = str(self.opts).strip()
if opts_str != '':
string += '; ' + opts_str
return string
def _rename_nodes(self, node_map):
"""Rename the nodes using dictionary node_map."""
return self._netsubs(node_map)
def _netmake1(self, name, nodes=None, args=None, opts=None):
if nodes is None:
nodes = self.relnodes
if args is None:
args = self.args
if not isinstance(args, tuple):
args = (args, )
if opts is None:
opts = self.opts
fmtargs = []
for arg in args:
fmtargs.append(self._arg_format(arg))
if len(fmtargs) == 1 and fmtargs[0] == name:
fmtargs = []
parts = [name]
parts.extend(nodes)
parts.extend(fmtargs)
# Insert keyword...
if self.keyword[0] == 0:
parts.append(self.keyword[1])
net = ' '.join(parts)
opts_str = str(opts).strip()
if opts_str != '':
net += '; ' + opts_str
return net
def _netmake(self, nodes=None, args=None, opts=None):
"""This keeps the same cpt name"""
return self._netmake1(self.namespace + self.relname, nodes, args, opts)
def _netmake_W(self, nodes=None, opts=None):
"""This is used for changing cpt name from L1 to W"""
return self._netmake1(self.namespace + 'W', nodes, args=(),
opts=opts)
def _netmake_O(self, nodes=None, opts=None):
"""This is used for changing cpt name from C1 to O"""
return self._netmake1(self.namespace + 'O', nodes, args=(),
opts=opts)
def _netmake_variant(self, newtype, nodes=None, args=None, opts=None,
suffix=''):
"""This is used for changing cpt name from C1 to ZC1"""
name = self.namespace + newtype + self.relname + suffix
return self._netmake1(name, nodes, args, opts)
def _select(self, kind=None):
"""Select domain kind for component."""
raise ValueError('Component not a source: %s' % self)
def _new_value(self, value, ic=None):
args = (value, )
if ic is not None:
args = (value, ic)
return self._netmake(args=args)
def _zero(self):
"""Zero value of the voltage source. This kills it but keeps it as a
voltage source in the netlist. This is required for dummy
voltage sources that are required to specify the controlling
current for CCVS and CCCS components."""
raise ValueError('Component not a source: %s' % self)
def _r_model(self):
"""Return resistive model of component."""
return self._copy()
def _s_model(self, var):
"""Return s-domain model of component."""
return self._copy()
def _ss_model(self):
"""Return state-space model of component."""
return self._copy()
def _pre_initial_model(self):
"""Return pre-initial model of component."""
return self._copy()
@property
def is_source(self):
"""Return True if component is a source (dependent or independent)"""
return self.dependent_source or self.independent_source
@property
def is_dependent_source(self):
"""Return True if component is a dependent source"""
return self.dependent_source
@property
def is_independent_source(self):
"""Return True if component is an independent source"""
return self.independent_source
@property
def _source_IV(self):
if self.cpt.is_voltage_source:
return self.cpt.Voc
elif self.cpt.is_current_source:
return self.cpt.Isc
else:
raise ValueError('%s is not a source' % self)
def in_parallel(self):
"""Return set of components in parallel with cpt."""
return self.cct.in_parallel(self.name)
def in_series(self):
"""Return set of components in series with cpt. Note, this
does not find components that do not share a node, for example,
R1 and R3 are not considered as being in series for
R1 + (R2 | R3) + R3"""
return self.cct.in_series(self.name)
@property
def is_causal(self):
"""Return True if causal component or if source produces
a causal signal."""
if self.is_source:
return self._source_IV.is_causal
return self.cpt.is_causal
@property
def is_dc(self):
"""Return True if source is dc."""
return self._source_IV.is_dc
@property
def is_ac(self):
"""Return True if source is ac."""
return self._source_IV.is_ac
@property
def has_ac(self):
"""Return True if source has ac component."""
return self._source_IV.has_ac
@property
def has_dc(self):
"""Return True if source has dc component."""
return self._source_IV.has_dc
@property
def has_noisy(self):
"""Return True if source has noisy component."""
return self._source_IV.has_noisy
@property
def has_s_transient(self):
"""Return True if source has transient component defined in s-domain."""
return self._source_IV.has_s_transient
@property
def has_t_transient(self):
"""Return True if source has transient component defined in time domain."""
return self._source_IV.has_t_transient
@property
def has_transient(self):
"""Return True if source has a transient component."""
return self._source_IV.has_transient
@property
def is_noisy(self):
"""Return True if source is noisy."""
if self.cpt.is_voltage_source:
return self.cpt.is_noisy
elif self.cpt.is_current_source:
return self.cpt.is_noisy
else:
raise ValueError('%s is not a source' % self)
@property
def is_noiseless(self):
"""Return True if component is noiseless."""
return self.cpt.is_noiseless
@property
def is_inductor(self):
"""Return True if component is an inductor."""
return self.cpt.is_inductor
@property
def is_capacitor(self):
"""Return True if component is a capacitor."""
return self.cpt.is_capacitor
@property
def is_reactance(self):
"""Return True if component is a capacitor or inductor."""
return self.is_capacitor or self.is_inductor
@property
def is_resistor(self):
"""Return True if component is a resistor."""
return self.cpt.is_resistor
@property
def is_conductor(self):
"""Return True if component is a conductor."""
return self.cpt.is_conductor
@property
def is_voltage_source(self):
"""Return True if component is a voltage source (dependent or
independent)"""
return self.cpt.is_voltage_source
@property
def is_dependent_voltage_source(self):
"""Return True if component is a dependent voltage source."""
return self.cpt.is_voltage_source and self.dependent_source
@property
def is_independent_voltage_source(self):
"""Return True if component is a independent voltage source."""
return self.cpt.is_voltage_source and self.independent_source
@property
def is_current_source(self):
"""Return True if component is a current source (dependent or
independent)"""
return self.cpt.is_current_source
@property
def is_dependent_current_source(self):
"""Return True if component is a dependent current source."""
return self.cpt.is_current_source and self.dependent_source
@property
def is_independent_current_source(self):
"""Return True if component is a independent current source."""
return self.cpt.is_current_source and self.independent_source
@property
def zeroic(self):
"""Return True if initial conditions are zero (or unspecified)."""
return self.cpt.zeroic
@property
def has_ic(self):
"""Return True if initial conditions are specified."""
return self.cpt.has_ic
@property
def I(self):
"""Current through component. The current is defined to be into the
positive node for passive devices and out of the positive node
for sources."""
return self.cct.get_I(self.name)
@property
def i(self):
"""Time-domain current through component. The current is
defined to be into the positive node for passive devices and
out of the positive node for sources."""
return self.cct.get_i(self.name)
@property
def V(self):
"""Voltage drop across component."""
return self.cct.get_Vd(self.nodenames[0], self.nodenames[1])
@property
def v(self):
"""Time-domain voltage drop across component."""
return self.cct.get_vd(self.nodenames[0], self.nodenames[1])
@property
def Isc(self):
"""Short-circuit current for component in isolation, i.e, current in
wire connected across component."""
return self.cpt.Isc.select(self.cct.kind)
@property
def isc(self):
"""Short-circuit time-domain current for component in isolation, i.e,
current in wire connected across component."""
return self.Isc.time()
@property
def Voc(self):
"""Open-circuit voltage for component in isolation."""
return self.cpt.Voc.select(self.cct.kind)
@property
def voc(self):
"""Open-circuit time-domain voltage for component in isolation."""
return self.Voc.time()
@property
def V0(self):
"""Initial voltage (for capacitors only)."""
return voltage(0)
@property
def I0(self):
"""Initial current (for inductors only)."""
return current(0)
@property
def admittance(self):
"""Self admittance of component.
The admittance is expressed in jomega form for AC circuits
and in s-domain for for transient circuits.
For the driving-point admittance measured across the component
use .dpY or .oneport().Y"""
return self.cpt.admittance._select(self.cct.kind)
@property
def impedance(self):
"""Self impedance of component.
The impedance is expressed in jomega form for AC circuits
and in s-domain for for transient circuits.
For the driving-point impedance measured across the component
use .dpZ or .oneport().Z"""
return self.cpt.impedance._select(self.cct.kind)
@property
def dpIsc(self):
"""Driving-point short-circuit current, i.e, current in wire
connected across component connected in-circuit.
"""
return self.oneport().Isc
@property
def dpisc(self):
"""Driving-point short-circuit time-domain current i.e, current in
wire connected across component in-circuit."""
return self.dpIsc.time()
@property
def dpVoc(self):
"""Driving-point open-circuit voltage across component in-circuit."""
return self.oneport().V
@property
def dpvoc(self):
"""Driving-point open-circuit time-domain voltage across component in
circuit."""
return self.dpVoc.time()
@property
def dpY(self):
"""Driving-point admittance measured across component in-circuit. For
the admittance of the component in isolation use .Y"""
return self.cct.admittance(self.nodenames[1], self.nodenames[0])
@property
def dpZ(self):
"""Driving-point impedance measured across component in-circuit. For
the impedance of the component in isolation use .Z"""
return self.cct.impedance(self.nodenames[1], self.nodenames[0])
def _dummy_node(self):
return self.cct._dummy_node()
def oneport(self):
"""Create oneport object."""
return self.cct.oneport(self.nodenames[1], self.nodenames[0])
def thevenin(self):
"""Create Thevenin oneport object."""
return self.cct.thevenin(self.nodenames[1], self.nodenames[0])
def norton(self):
"""Create Norton oneport object."""
return self.cct.norton(self.nodenames[1], self.nodenames[0])
def transfer(self, cpt):
"""Create transfer function for the s-domain voltage across the
specified cpt divided by the s-domain voltage across self."""
if isinstance(cpt, str):
cpt = self.cct._elements[cpt]
return self.cct.transfer(self.nodenames[1], self.nodenames[0],
cpt.nodenames[1], cpt.nodenames[0])
@property
def nodes(self):
"""Return list of nodes for this component. See also
nodenames."""
return [self.cct.nodes[nodename] for nodename in self.nodenames]
def connected(self):
"""Return list of components connected to this component."""
cpts = set()
for node in self.nodes:
cpts = cpts.union(set(node.connected))
return list(cpts)
def is_connected(self, cpt):
"""Return True if cpt is connected to this component."""
if isinstance(cpt, str):
for cpt1 in self.connected:
if cpt1.name == cpt:
return True
return False
return cpt in self.connected
def open_circuit(self):
"""Apply open-circuit in series with component. Returns name of
open-circuit component."""
dummy_node = self._dummy_node()
net = self._netmake((dummy_node, ) + self.relnodes[1:])
self.cct.remove(self.name)
self.cct.add(net)
self.cct.add('O? %s %s' % (self.relnodes[0], dummy_node))
return self.cct.last_added()
def short_circuit(self):
"""Apply short-circuit across component. Returns name of voltage
source component used as the short."""
parallel_set = self.in_parallel()
for cptname in parallel_set:
cpt = self.cct.elements[cptname]
if cpt.is_voltage_source:
if cpt.value == 0:
warn('Component %s already shorted by %s' % (self.name, cptname))
else:
warn('Shorting voltage source %s in parallel with %s' % (cptname, self.name))
elif cpt.is_current_source:
warn('Shorting current source %s in parallel with %s' % (cptname, self.name))
# Could add wire or zero ohm resistor but then could not
# determine current through the short. So instead add a
# voltage source.
self.cct.add('V? %s %s 0' % (self.nodes[0].name, self.nodes[1].name))
return self.cct.last_added()
class Invalid(Cpt):
pass
class NonLinear(Invalid):
def _stamp(self, mna):
raise NotImplementedError('Cannot analyse non-linear component: %s' % self)
class TimeVarying(Invalid):
def _stamp(self, mna):
raise NotImplementedError('Cannot analyse time-varying component: %s' % self)
class Logic(Invalid):
def _stamp(self, mna):
raise NotImplementedError('Cannot analyse logic component: %s' % self)
class Misc(Invalid):
def _stamp(self, mna):
raise NotImplementedError('Cannot analyse misc component: %s' % self)
class Dummy(Cpt):
causal = True
dc = False
ac = False
zeroic = True
has_ic = None
noisy = False
class XX(Dummy):
directive = True
ignore = True
def _stamp(self, mna):
pass
def _subs(self, subs_dict):
return self._copy()
def _rename_nodes(self, node_map):
"""Rename the nodes using dictionary node_map."""
return self._copy()
def __str__(self):
return self._string
class A(XX):
pass
class IndependentSource(Cpt):
independent_source = True
def _zero(self):
"""Zero value of the source. For a voltage source this makes it a
short-circuit; for a current source this makes it
open-circuit. This effectively kills the source but keeps it
as a source in the netlist. This is required for dummy
voltage sources that are required to specify the controlling
current for CCVS and CCCS components.
"""
return self._netsubs(zero=True)
class DependentSource(Dummy):
dependent_source = True
def _zero(self):
return self._copy()
class RLC(Cpt):
def _s_model(self, var):
if self.Voc == 0:
return self._netmake_variant('Z', args=self.Z(var))
dummy_node = self._dummy_node()
opts = self.opts.copy()
# Strip voltage labels and save for open-circuit cpt
# in parallel with Z and V.
voltage_opts = opts.strip_voltage_labels()
znet = self._netmake_variant('Z', nodes=(self.relnodes[0], dummy_node),
args=self.Z(var), opts=opts)
# Strip voltage and current labels from voltage source.
opts.strip_all_labels()
vnet = self._netmake_variant('V', nodes=(dummy_node, self.relnodes[1]),
args=self.Voc.laplace()(var), opts=opts)
if voltage_opts == {}:
return znet + '\n' + vnet
# Create open circuit in parallel to the Z and V
# that has the voltage labels.
opts = self.opts.copy()
opts.strip_current_labels()
# Need to convert voltage labels to s-domain.
# v(t) -> V(s)
# v_C -> V_C
# v_L(t) -> V_L(s)
for opt, val in voltage_opts.items():
opts[opt] = capitalize_name(val)
onet = self._netmake_O(opts=opts)
return znet + '\n' + vnet + '\n' + onet
class RC(RLC):
def _noisy(self, T='T'):
dummy_node = self._dummy_node()
opts = self.opts.copy()
# Should check for namespace conflict if user has defined
# a noiseless resistor.
rnet = self._netmake_variant('N', nodes=(self.relnodes[0], dummy_node),
args=str(self.R), opts=opts)
# Use k_B for Boltzmann's constant to avoid clash with k symbol
# for discrete frequency
Vn = 'sqrt(4 * k_B * %s * %s)' % (T, self.args[0])
vnet = self._netmake_variant('Vn', nodes=(dummy_node, self.relnodes[1]),
args=('noise', Vn), opts=opts)
return rnet + '\n' + vnet
def _stamp(self, mna):
# L's can also be added with this stamp but if have coupling
# it is easier to generate a stamp that requires the branch current
# through the L.
n1, n2 = mna._cpt_node_indexes(self)
if self.type == 'C' and mna.kind == 'dc':
Y = 0
else:
Y = self.Y.sympy
if n1 >= 0 and n2 >= 0:
mna._G[n1, n2] -= Y
mna._G[n2, n1] -= Y
if n1 >= 0:
mna._G[n1, n1] += Y
if n2 >= 0:
mna._G[n2, n2] += Y
if mna.kind == 'ivp' and self.cpt.has_ic:
I = self.Isc.sympy
if n1 >= 0:
mna._Is[n1] += I
if n2 >= 0:
mna._Is[n2] -= I
class C(RC):
reactive = True
add_parallel = True
@property
def C(self):
return self.cpt.C
def _kill(self):
"""Kill implicit sources due to initial conditions."""
return self.netmake(args=self.args[0])
def _initialize(self, ic):
"""Change initial condition to ic."""
return self._netmake(args=(self.args[0], ic))
def _pre_initial_model(self):
return self._netmake_variant('V', args=self.cpt.v0)
def _r_model(self):
dummy_node = self._dummy_node()
opts = self.opts.copy()
# Use Thevenin model. This will require the current through
# the voltage source to be explicitly computed.
Req = 'R%seq' % self.name
Veq = 'V%seq' % self.name
opts.strip_voltage_labels()
rnet = self._netmake_variant('R', suffix='eq',
nodes=(self.relnodes[0], dummy_node),
args=Req, opts=opts)
opts.strip_current_labels()
vnet = self._netmake_variant('V', suffix='eq',
nodes=(dummy_node, self.relnodes[1]),
args=('dc', Veq), opts=opts)
# TODO: the voltage labels should be added across an
# open-circuit object.
return rnet + '\n' + vnet
def _ss_model(self):
# Perhaps mangle name to ensure it does not conflict
# with another voltage source?
return self._netmake_variant('V_', args='v_%s(t)' % self.relname)
@property
def V0(self):
"""Initial voltage (for capacitors only)."""
if self.cct.kind == 'ivp' and self.cpt.has_ic:
return voltage(self.cpt.v0 / s)
return voltage(0)
class CPE(RC):
# If n == 0, then not reactive
reactive = True
pass
class VCVS(DependentSource):
"""VCVS"""
need_branch_current = True
def _stamp(self, mna):
n1, n2, n3, n4 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n1 >= 0:
mna._B[n1, m] += 1
mna._C[m, n1] += 1
if n2 >= 0:
mna._B[n2, m] -= 1
mna._C[m, n2] -= 1
Ad = ConstantDomainExpression(self.args[0]).sympy
if len(self.args) > 1:
Ac = ConstantDomainExpression(self.args[1]).sympy
else:
Ac = 0
Ap = (Ac / 2 + Ad)
Am = (Ac / 2 - Ad)
if n3 >= 0:
mna._C[m, n3] -= Ap
if n4 >= 0:
mna._C[m, n4] -= Am
def _kill(self):
newopts = self.opts.copy()
newopts.strip_current_labels()
newopts.strip_labels()
return self._netmake_W(opts=newopts)
class CCCS(DependentSource):
"""CCCS"""
need_control_current = True
def _stamp(self, mna):
n1, n2 = mna._cpt_node_indexes(self)
m = mna._branch_index(self.args[0])
F = ConstantDomainExpression(self.args[1]).sympy
if n1 >= 0:
mna._B[n1, m] -= F
if n2 >= 0:
mna._B[n2, m] += F
def _kill(self):
newopts = self.opts.copy()
newopts.strip_voltage_labels()
newopts.strip_labels()
return self._netmake_O(opts=newopts)
class FB(Misc):
"""Ferrite bead"""
pass
class VCCS(DependentSource):
"""VCCS"""
def _stamp(self, mna):
n1, n2, n3, n4 = mna._cpt_node_indexes(self)
G = ConstantDomainExpression(self.args[0]).sympy
if n1 >= 0 and n3 >= 0:
mna._G[n1, n3] -= G
if n1 >= 0 and n4 >= 0:
mna._G[n1, n4] += G
if n2 >= 0 and n3 >= 0:
mna._G[n2, n3] += G
if n2 >= 0 and n4 >= 0:
mna._G[n2, n4] -= G
def _kill(self):
newopts = self.opts.copy()
newopts.strip_voltage_labels()
newopts.strip_labels()
return self._netmake_O(opts=newopts)
class GY(Dummy):
"""Gyrator"""
need_branch_current = True
need_extra_branch_current = True
def _stamp(self, mna):
n1, n2, n3, n4 = mna._cpt_node_indexes(self)
m1 = self.mna._branch_index(self.defname + 'X')
m2 = mna._cpt_branch_index(self)
# m1 is the input branch
# m2 is the output branch
# GY.I gives the current through the output branch
# Could generalise to have different input and output
# impedances, Z1 and Z2, but if Z1 != Z2 then the device is
# not passive.
# V2 = -I1 Z2 V1 = I2 Z1
# where V2 = V[n1] - V[n2] and V1 = V[n3] - V[n4]
Z1 = ConstantDomainExpression(self.args[0]).sympy
Z2 = Z1
if n1 >= 0:
mna._B[n1, m2] += 1
mna._C[m1, n1] += 1
if n2 >= 0:
mna._B[n2, m2] -= 1
mna._C[m1, n2] -= 1
if n3 >= 0:
mna._B[n3, m1] += 1
mna._C[m2, n3] += 1
if n4 >= 0:
mna._B[n4, m1] -= 1
mna._C[m2, n4] -= 1
mna._D[m1, m1] += Z2
mna._D[m2, m2] -= Z1
class TVtriode(Dummy):
"""Triode"""
need_branch_current = True
need_extra_branch_current = True
def _stamp(self, cct):
n1, n2, n3= self.node_indexes
m1 = self.cct._branch_index(self.defname + 'X')
m2 = self.branch_index
# m1 is the input branch
# m2 is the output branch
# GY.I gives the current through the output branch
# Could generalise to have different input and output
# impedances, Z1 and Z2, but if Z1 != Z2 then the device is
# not passive.
# V2 = -I1 Z2 V1 = I2 Z1
# where V2 = V[n1] - V[n2] and V1 = V[n3] - V[n4]
Z1 = ConstantDomainExpression(self.args[0]).expr
Z2 = Z1
if n1 >= 0:
cct._B[n1, m2] += 1
cct._C[m1, n1] += 1
if n2 >= 0:
cct._B[n2, m2] -= 1
cct._C[m1, n2] -= 1
if n3 >= 0:
cct._B[n3, m1] += 1
cct._C[m2, n3] += 1
cct._D[m1, m1] += Z2
cct._D[m2, m2] -= Z1
class CCVS(DependentSource):
"""CCVS"""
need_branch_current = True
need_control_current = True
def _stamp(self, mna):
n1, n2 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n1 >= 0:
mna._B[n1, m] += 1
mna._C[m, n1] += 1
if n2 >= 0:
mna._B[n2, m] -= 1
mna._C[m, n2] -= 1
mc = mna._branch_index(self.args[0])
G = ConstantDomainExpression(self.args[1]).sympy
mna._D[m, mc] -= G
def _kill(self):
newopts = self.opts.copy()
newopts.strip_current_labels()
newopts.strip_labels()
return self._netmake_O(opts=newopts)
class I(IndependentSource):
add_parallel = True
def _select(self, kind=None):
"""Select domain kind for component."""
return self._netmake(args=self.cpt.Isc.netval(kind))
def _kill(self):
newopts = self.opts.copy()
newopts.strip_voltage_labels()
newopts.strip_labels()
return self._netmake_O(opts=newopts)
def _stamp(self, mna):
n1, n2 = mna._cpt_node_indexes(self)
I = self.Isc.sympy
if n1 >= 0:
mna._Is[n1] += I
if n2 >= 0:
mna._Is[n2] -= I
def _ss_model(self):
return self._netmake(args='%s(t)' % self.relname.lower())
def _s_model(self, var):
return self._netmake(args=self.Isc.laplace()(var))
def _pre_initial_model(self):
return self._netmake(args=self.cpt.Isc.pre_initial_value())
class K(Dummy):
def __init__(self, cct, namespace, defname, name, cpt_type, cpt_id, string,
opts_string, nodes, keyword, *args):
self.Lname1 = args[0]
self.Lname2 = args[1]
super (K, self).__init__(cct, namespace, defname, name,
cpt_type, cpt_id, string,
opts_string, nodes, keyword, *args)
def _stamp(self, mna):
from .sym import ssym
if mna.kind == 'dc':
return
if mna.kind in ('t', 'time'):
raise RuntimeError('Should not be evaluating mutual inductance in'
' time domain')
L1 = self.Lname1
L2 = self.Lname2
K = self.cpt.K
ZL1 = mna.cct.elements[L1].Z.sympy
ZL2 = mna.cct.elements[L2].Z.sympy
if mna.kind in ('s', 'ivp', 'laplace'):
# FIXME, generalise for other domains...
ZM = K.sympy * sym.sqrt(ZL1 * ZL2 / ssym**2) * ssym
else:
ZM = K.sympy * sym.sqrt(ZL1 * ZL2)
m1 = mna._branch_index(L1)
m2 = mna._branch_index(L2)
mna._D[m1, m2] += -ZM
mna._D[m2, m1] += -ZM
class L(RLC):
need_branch_current = True
reactive = True
add_series = True
def _r_model(self):
dummy_node = self._dummy_node()
opts = self.opts.copy()
# Use Thevenin model. This will require the current through
# the voltage source to be explicitly computed.
Req = 'R%seq' % self.name
Veq = 'V%seq' % self.name
opts.strip_voltage_labels()
rnet = self._netmake_variant('R', suffix='eq',
nodes=(self.relnodes[0], dummy_node),
args=Req, opts=opts)
opts.strip_current_labels()
vnet = self._netmake_variant('V', suffix='eq',
nodes=(dummy_node, self.relnodes[1]),
args=('dc', Veq), opts=opts)
# TODO: the voltage labels should be added across an
# open-circuit object.
return rnet + '\n' + vnet
@property
def I0(self):
"""Initial current (for capacitors only)."""
if self.cct.kind == 'ivp' and self.cpt.has_ic:
return current(self.cpt.i0 / s)
return current(0)
@property
def L(self):
return self.cpt.L
def _kill(self):
"""Kill implicit sources due to initial conditions."""
return self.netmake(args=self.args[0])
def _initialize(self, ic):
"""Change initial condition to ic."""
return self._netmake(args=(self.args[0], ic))
def _stamp(self, mna):
# This formulation adds the inductor current to the unknowns
n1, n2 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n1 >= 0:
mna._B[n1, m] = 1
mna._C[m, n1] = 1
if n2 >= 0:
mna._B[n2, m] = -1
mna._C[m, n2] = -1
if mna.kind == 'dc':
Z = 0
else:
Z = self.Z.sympy
mna._D[m, m] += -Z
if mna.kind == 'ivp' and self.cpt.has_ic:
V = self.Voc.sympy
mna._Es[m] += V
def _ss_model(self):
# Perhaps mangle name to ensure it does not conflict
# with another current source?
return self._netmake_variant('I_', args='-i_%s(t)' % self.relname)
def _pre_initial_model(self):
return self._netmake_variant('I', args=self.cpt.i0)
class O(Dummy):
"""Open circuit"""
def _stamp(self, mna):
pass
@property
def I(self):
return SuperpositionCurrent(0)
@property
def i(self):
return SuperpositionCurrent(0)(t)
class P(O):
"""Port"""
pass
class R(RC):
add_series = True
def _r_model(self):
return self._copy()
class NR(R):
add_series = True
def _r_model(self):
return self._copy()
class RV(RC):
# TODO. Can simulate as series resistors (1 - alpha) R and alpha R.
pass
class SPpp(Dummy):
need_branch_current = True
def _stamp(self, mna):
n1, n2, n3 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n3 >= 0:
mna._B[n3, m] += 1
mna._C[m, n3] += 1
if n1 >= 0:
mna._C[m, n1] -= 1
if n2 >= 0:
mna._C[m, n2] -= 1
class SPpm(Dummy):
need_branch_current = True
def _stamp(self, mna):
n1, n2, n3 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n3 >= 0:
mna._B[n3, m] += 1
mna._C[m, n3] += 1
if n1 >= 0:
mna._C[m, n1] -= 1
if n2 >= 0:
mna._C[m, n2] += 1
class SPppp(Dummy):
need_branch_current = True
def _stamp(self, mna):
n1, n2, n3, n4 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n3 >= 0:
mna._B[n3, m] += 1
mna._C[m, n3] += 1
if n1 >= 0:
mna._C[m, n1] -= 1
if n2 >= 0:
mna._C[m, n2] -= 1
if n4 >= 0:
mna._C[m, n4] -= 1
class SPpmm(Dummy):
need_branch_current = True
def _stamp(self, mna):
n1, n2, n3, n4 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n3 >= 0:
mna._B[n3, m] += 1
mna._C[m, n3] += 1
if n1 >= 0:
mna._C[m, n1] -= 1
if n2 >= 0:
mna._C[m, n2] += 1
if n4 >= 0:
mna._C[m, n4] += 1
class SPppm(Dummy):
need_branch_current = True
def _stamp(self, mna):
n1, n2, n3, n4 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n3 >= 0:
mna._B[n3, m] += 1
mna._C[m, n3] += 1
if n1 >= 0:
mna._C[m, n1] -= 1
if n2 >= 0:
mna._C[m, n2] -= 1
if n4 >= 0:
mna._C[m, n4] += 1
class TF(Cpt):
"""Transformer"""
need_branch_current = True
is_transformer = True
def _stamp(self, mna):
n1, n2, n3, n4 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n1 >= 0:
mna._B[n1, m] += 1
mna._C[m, n1] += 1
if n2 >= 0:
mna._B[n2, m] -= 1
mna._C[m, n2] -= 1
# Voltage gain = 1 / a where a = N_1 / N_2
# is the turns-ratio.
T = self.cpt.alpha.sympy
if n3 >= 0:
mna._B[n3, m] -= T
mna._C[m, n3] -= T
if n4 >= 0:
mna._B[n4, m] += T
mna._C[m, n4] += T
class TFtap(Cpt):
"""Tapped transformer"""
def _stamp(self, mna):
raise NotImplementedError('Cannot analyse tapped transformer %s' % self)
class TL(Cpt):
"""Transmission line"""
reactive = True
need_branch_current = True
def _stamp(self, mna):
if mna.kind != 's':
raise ValueError('Only Laplace domain currently supported for TL')
cpt = self.cpt
m = mna._cpt_branch_index(self)
n4, n3, n2, n1 = mna._cpt_node_indexes(self)
# TODO, tweak values if doing phasor analysis
A11 = cpt.A11.sympy
A12 = cpt.A12.sympy
A21 = cpt.A21.sympy
A22 = cpt.A22.sympy
# This stamp is the same as an A twoport.
if n1 >= 0:
if n3 >= 0:
mna._G[n1, n3] += A21
if n4 >= 0:
mna._G[n1, n4] -= A21
mna._B[n1, m] += A22
if n2 >= 0:
if n3 >= 0:
mna._G[n2, n3] -= A21
if n4 >= 0:
mna._G[n2, n4] += A21
mna._B[n2, m] -= A22
if n3 >= 0:
mna._B[n3, m] -= 1
if n4 >= 0:
mna._B[n4, m] += 1
if n1 >= 0:
mna._C[m, n1] -= 1
if n2 >= 0:
mna._C[m, n2] += 1
if n3 >= 0:
mna._C[m, n3] += A11
if n4 >= 0:
mna._C[m, n4] -= A11
mna._D[m, m] += A12
class Cable(Cpt):
"""Cable"""
equipotential_nodes = (('in+', 'out+'), ('in-', 'out-'), ('in', 'out'),
('ignd', 'ognd', 'b', 't'), ('mid', 'out'))
def _stamp(self, mna):
pass
class TP(Misc):
"""Two port"""
# TODO
pass
class TPCpt(Cpt):
pass
class TPA(TPCpt):
"""A-parameter two port"""
need_branch_current = True
def _stamp(self, mna):
cpt = self.cpt
if cpt.V1a != 0 or cpt.I1a != 0:
raise ValueError('Sources not supported yet for %s' % self)
m = mna._cpt_branch_index(self)
n4, n3, n2, n1 = mna._cpt_node_indexes(self)
A11, A12, A21, A22 = cpt.A11.sympy, cpt.A12.sympy, cpt.A21.sympy, cpt.A22.sympy
if n1 >= 0:
if n3 >= 0:
mna._G[n1, n3] += A21
if n4 >= 0:
mna._G[n1, n4] -= A21
mna._B[n1, m] += A22
if n2 >= 0:
if n3 >= 0:
mna._G[n2, n3] -= A21
if n4 >= 0:
mna._G[n2, n4] += A21
mna._B[n2, m] -= A22
if n3 >= 0:
mna._B[n3, m] -= 1
if n4 >= 0:
mna._B[n4, m] += 1
if n1 >= 0:
mna._C[m, n1] -= 1
if n2 >= 0:
mna._C[m, n2] += 1
if n3 >= 0:
mna._C[m, n3] += A11
if n4 >= 0:
mna._C[m, n4] -= A11
mna._D[m, m] += A12
class TPB(TPA):
"""B-parameter two port"""
def _stamp(self, mna):
if self.cpt.V2b != 0 or self.cpt.I2b != 0:
raise ValueError('Sources not supported yet for %s' % self)
super (TPB, self)._stamp(mna)
class TPG(TPA):
"""G-parameter two port"""
# TODO, create G stamp directly
def _stamp(self, mna):
if self.cpt.I1g != 0 or self.cpt.V2g != 0:
raise ValueError('Sources not supported yet for %s' % self)
super (TPG, self)._stamp(mna)
class TPH(TPA):
"""H-parameter two port"""
# TODO, create H stamp directly
def _stamp(self, mna):
if self.cpt.V1h != 0 or self.cpt.I2h != 0:
raise ValueError('Sources not supported yet for %s' % self)
super (TPH, self)._stamp(mna)
class TPY(TPCpt):
"""Y-parameter two port"""
def _stamp(self, mna):
cpt = self.cpt
if cpt.I1y != 0 or cpt.I2y != 0:
raise ValueError('Sources not supported yet for %s' % self)
n3, n4, n1, n2 = mna._cpt_node_indexes(self)
Y11, Y12, Y21, Y22 = cpt.Y11.sympy, cpt.Y12.sympy, cpt.Y21.sympy, cpt.Y22.sympy
if n1 >= 0:
mna._G[n1, n1] += Y11
if n2 >= 0:
mna._G[n1, n2] -= Y11
if n3 >= 0:
mna._G[n1, n3] += Y12
if n4 >= 0:
mna._G[n1, n4] -= Y12
if n2 >= 0:
if n1 >= 0:
mna._G[n2, n1] -= Y11
mna._G[n2, n2] += Y11
if n3 >= 0:
mna._G[n2, n3] -= Y12
if n4 >= 0:
mna._G[n2, n4] += Y12
if n3 >= 0:
if n1 >= 0:
mna._G[n3, n1] += Y21
if n2 >= 0:
mna._G[n3, n2] -= Y21
mna._G[n3, n3] += Y22
if n4 >= 0:
mna._G[n3, n4] -= Y22
if n4 >= 0:
if n1 >= 0:
mna._G[n4, n1] -= Y21
if n2 >= 0:
mna._G[n4, n2] += Y21
if n3 >= 0:
mna._G[n4, n3] -= Y22
mna._G[n4, n4] += Y22
class TPZ(TPY):
"""Z-parameter two port"""
# TODO, create Z stamp directly
def _stamp(self, mna):
if self.cpt.V1z != 0 or self.cpt.V2z != 0:
raise ValueError('Sources not supported yet for %s' % self)
super (TPZ, self)._stamp(mna)
class TR(Dummy):
"""Transfer function. This is equivalent to a VCVS with the input and
output referenced to node 0."""
need_branch_current = True
def _stamp(self, mna):
n1, n2 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n2 >= 0:
mna._B[n2, m] += 1
mna._C[m, n2] += 1
A = ConstantDomainExpression(self.args[0]).sympy
if n1 >= 0:
mna._C[m, n1] -= A
class V(IndependentSource):
need_branch_current = True
flip_branch_current = True
add_series = True
def _select(self, kind=None):
"""Select domain kind for component."""
return self._netmake(args=self.cpt.Voc.netval(kind))
def _kill(self):
newopts = self.opts.copy()
newopts.strip_current_labels()
newopts.strip_labels()
return self._netmake_W(opts=newopts)
def _stamp(self, mna):
n1, n2 = mna._cpt_node_indexes(self)
m = mna._cpt_branch_index(self)
if n1 >= 0:
mna._B[n1, m] += 1
mna._C[m, n1] += 1
if n2 >= 0:
mna._B[n2, m] -= 1
mna._C[m, n2] -= 1
V = self.Voc.sympy
mna._Es[m] += V
def _ss_model(self):
return self._netmake(args='%s(t)' % self.relname.lower())
def _s_model(self, var):
return self._netmake(args=self.cpt.Voc.laplace()(var))
def _pre_initial_model(self):
return self._netmake(args=self.cpt.Voc.pre_initial_value())
class W(Dummy):
"""Wire"""
def _stamp(self, mna):
pass
@property
def I(self):
raise ValueError('Cannot determine current through wire, use a 0 V voltage source')
@property
def i(self):
raise ValueError('Cannot determine current through wire, use a 0 V voltage source')
class XT(Misc):
"""Crystal"""
reactive = True
class Y(RC):
"""Admittance"""
reactive = True
add_parallel = True
class Z(RC):
"""Impedance"""
reactive = True
add_series = True
classes = {}
def defcpt(name, base, docstring):
if isinstance(base, str):
base = classes[base]
newclass = type(name, (base, ), {'__doc__': docstring})
classes[name] = newclass
def make(classname, parent, name, cpt_type, cpt_id,
string, opts_string, nodes, *args):
# Create instance of component object
newclass = classes[classname]
cpt = newclass(parent, name, cpt_type, cpt_id, string, opts_string,
nodes, *args)
return cpt
# Dynamically create classes.
defcpt('A', Misc, 'Annotation')
defcpt('ADC', Misc, 'ADC')
defcpt('AM', W, 'Ammeter')
defcpt('BAT', V, 'Battery')
defcpt('D', NonLinear, 'Diode')
defcpt('DAC', Misc, 'DAC')
defcpt('Dled', 'D', 'LED')
defcpt('Dphoto', 'D', 'Photo diode')
defcpt('Dschottky', 'D', 'Schottky diode')
defcpt('Dtunnel', 'D', 'Tunnel diode')
defcpt('Dzener', 'D', 'Zener diode')
defcpt('E', VCVS, 'VCVS')
defcpt('Eopamp', VCVS, 'Opamp')
defcpt('Efdopamp', VCVS, 'Fully differential opamp')
defcpt('Eamp', VCVS, 'Amplifier')
defcpt('F', CCCS, 'CCCS')
defcpt('FS', Misc, 'Fuse')
defcpt('G', VCCS, 'VCCS')
defcpt('H', CCVS, 'CCVS')
defcpt('sI', I, 's-domain current source')
defcpt('Isin', I, 'Sinusoidal current source')
defcpt('Idc', I, 'DC current source')
defcpt('Istep', I, 'Step current source')
defcpt('Iac', I, 'AC current source')
defcpt('Inoise', I, 'Noise current source')
defcpt('J', NonLinear, 'N JFET transistor')
defcpt('Jnjf', 'J', 'N JFET transistor')
defcpt('Jpjf', 'J', 'P JFET transistor')
defcpt('M', NonLinear, 'N MOSJFET transistor')
defcpt('Mnmos', 'M', 'N channel MOSJFET transistor')
defcpt('Mpmos', 'M', 'P channel MOSJFET transistor')
defcpt('MISC', Misc, 'Generic circuitikz bipole')
defcpt('MT', Misc, 'Motor')
defcpt('MX', Misc, 'Mixer')
defcpt('Q', NonLinear, 'NPN transistor')
defcpt('Qpnp', 'Q', 'PNP transistor')
defcpt('Qnpn', 'Q', 'NPN transistor')
defcpt('Sbox', Misc, 'Box shape')
defcpt('Scircle', Misc, 'Circle shape')
defcpt('Sellipse', Misc, 'Ellipse shape')
defcpt('Striangle', Misc, 'Triangle shape')
defcpt('SW', TimeVarying, 'Switch')
defcpt('SWno', 'SW', 'Normally open switch')
defcpt('SWnc', 'SW', 'Normally closed switch')
defcpt('SWpush', 'SW', 'Pushbutton switch')
defcpt('SWspdt', 'SW', 'SPDT switch')
defcpt('TFcore', TF, 'Transformer with core')
defcpt('TFtapcore', TFtap, 'Transformer with core')
defcpt('TLlossless', TL, 'Lossless transmission line')
defcpt('Ubuffer', Logic, 'Buffer')
defcpt('Upbuffer', Logic, 'Buffer with power supplies')
defcpt('Uinverter', Logic, 'Inverter')
defcpt('Upinverter', Logic, 'Inverter with power supplies')
defcpt('Uinamp', Misc, 'Instrumentation amplifier')
defcpt('Uisoamp', Misc, 'Isolated amplifier')
defcpt('Udiffamp', Misc, 'Differential amplifier')
defcpt('Udiffdriver', Misc, 'Differential driver')
defcpt('Ufdopamp', Misc, 'Fully differential opamp ')
defcpt('Uopamp', Misc, 'Opamp ')
defcpt('Uregulator', Misc, 'Regulator ')
defcpt('Uadc', Misc, 'ADC')
defcpt('Udac', Misc, 'DAC')
defcpt('Ubox', Misc, 'Box')
defcpt('Ucircle', Misc, 'Circle')
defcpt('Ubox4', Misc, 'Box')
defcpt('Ubox12', Misc, 'Box')
defcpt('Ucircle4', Misc, 'Circle')
defcpt('Uchip1313', Logic, 'General purpose chip')
defcpt('Uchip2121', Logic, 'General purpose chip')
defcpt('Uchip2222', Logic, 'General purpose chip')
defcpt('Uchip3131', Logic, 'General purpose chip')
defcpt('Uchip3333', Logic, 'General purpose chip')
defcpt('Uchip4141', Logic, 'General purpose chip')
defcpt('Uchip4444', Logic, 'General purpose chip')
defcpt('Uchip8181', Logic, 'General purpose chip')
defcpt('Uchip8888', Logic, 'General purpose chip')
defcpt('Umux21', Logic, '2-1 multiplexer')
defcpt('Umux41', Logic, '4-1 multiplexer')
defcpt('Umux42', Logic, '4-2 multiplexer')
defcpt('Udff', Misc, 'D flip-flop')
defcpt('Ujkff', Misc, 'JK flip-flop')
defcpt('Urslatch', Misc, 'RS latch')
defcpt('sV', V, 's-domain voltage source')
defcpt('Vsin', V, 'Sinusoidal voltage source')
defcpt('Vdc', V, 'DC voltage source')
defcpt('Vstep', V, 'Step voltage source')
defcpt('Vac', V, 'AC voltage source')
defcpt('Vnoise', V, 'Noise voltage source')
defcpt('VM', O, 'Voltmeter')
# Let's choose mechanical analogue II (the impedance analogue) where
# force is equivalent to voltage and velocity is equivalent to
# current. With this analogy parallel and serial have to be switched.
defcpt('m', L, 'Mass')
defcpt('k', C, 'Spring')
defcpt('r', R, 'Damper')
# Append classes defined in this module but not imported.
clsmembers = inspect.getmembers(module, lambda member: inspect.isclass(member) and member.__module__ == __name__)
for name, cls in clsmembers:
classes[name] = cls
|
mph-/lcapy
|
lcapy/mnacpts.py
|
Python
|
lgpl-2.1
| 52,253
|
[
"CRYSTAL"
] |
c30b2539483228a399630644726eb0f0f0458201d6c4e51e3218ff4b34249b66
|
#!/usr/bin/env python
# Python script to iterate through the bowtie2 read mapping log to determine the proportion of aligned reads
import sys
import subprocess
import re
import argparse
import os
## Define functions
# Define a function to deal with the bowtie logs
def bowtie_log(bowlog):
# Establish the list first
toprintline = []
for l in bowlog:
l = l.strip()
# Retrieve the sample name
if l.startswith('Starting mapping for sample'):
# This is the first line of information, so append the data
## from the previous iteration, if present
if toprintline:
toprint.append(toprintline)
toprintline = []
tmp = l.split()
sample = tmp[4].strip("_:")
toprintline.append(sample)
# Retrieve the number of reads
if re.search('reads; of these', l):
tmp = l.split()
reads = tmp[0]
toprintline.append(reads)
# Retrieve the number of reads that aligned zero times
if re.search('aligned 0 times', l):
tmp = l.split()
reads = tmp[0]
toprintline.append(reads)
# Retrieve the number of reads that aligned once
if re.search("aligned exactly 1 time", l):
tmp = l.split()
reads = tmp[0]
toprintline.append(reads)
# Retrieve the number of reads that aligned once
if re.search("aligned >1 time", l):
tmp = l.split()
reads = tmp[0]
toprintline.append(reads)
# Argument parser
parser = argparse.ArgumentParser()
# Add arguments
# File containing the list of zipped FastQC files
inputgroup = parser.add_mutually_exclusive_group()
inputgroup.add_argument('-i',
'--log-file',
metavar = 'LOG',
help = 'The read mapping log file (This is the stderr from the run_read_mapping.sh script',
required = False)
inputgroup.add_argument('-d',
'--log_directory',
metavar = 'DIR',
help = 'Head directory containing the logs from the run_read_mapping.sh script',
required = False)
parser.add_argument('-o',
'--output',
metavar = 'OUTFILE',
help = 'Output filename (including extension)',
required = True)
parser.add_argument('-g',
'--gbarleys_directory',
metavar = 'GDIR',
help = 'Complete filepath to the GBarleyS directory',
required = True)
parser.add_argument('-p',
'--output_PDF',
help = 'Optional flag to print a PDF of the graphs from the log file (requires that accompanying R script be present in the GBarleyS directory)',
action = 'store_true',
required = False)
# Parse the arguments
args = parser.parse_args()
# Get the output filename
filename = args.output
# Dealing with new data from the log files
toprintmap = [] # List of file data lists
# Add a header
toprintmap.append(['Filename', 'Total.reads', 'Reads.not.aligned', 'Reads.once.aligned', 'Reads.multiple.aligned'])
# Run the following if the input if a directory
if args.log_directory:
# Collect the dirname of the directory containing the log files
dirname = os.path.dirname(args.log_directory)
# Create a list of directory files to work with
filelist = []
# Listing roots, subdirs, and files in the head directory containing the log files
for root, subdir, files in os.walk(args.log_directory):
filelist.append([root, subdir, files])
newfilelist = []
# Iterate through filelist to assemble filepaths
for item in filelist:
root = item[0]
subdir = item[1]
files = item[2]
# Skip if files is empty
if not files:
continue
else:
for f in files:
newfilelist.append('/'.join([root, f]))
# For each file in the list, parse it
for f in newfilelist:
# Open the file
with open(f, 'r') as infile:
# Run the function
toprint = []
bowtie_log(infile)
# Append each line to the master list
for line in toprint:
toprintmap.append(line)
# If just a file is given, just use the file
elif args.log_file:
newfilelist = args.log_file
with open(newfilelist, 'r') as infile:
# Run the function
toprint = []
bowtie_log(infile)
toprintmap.append(toprint)
else:
print "No input was given. Stopping."
exit(1)
# Open a file to print
handle = open(filename, 'w')
# Print to the file
for line in toprintmap:
handle.write('\t'.join(line) + '\n')
# Close the file
handle.close()
# Set the GBarleyS directory so the graphing function script can be found
graph_function = args.gbarleys_directory + '/Pipeline_Scripts/.ancillary/pipeline_graphing_functions.R'
if args.output_PDF:
# Notify the user
print "Sending the data to R and outputting a PDF."
# Set the command for shell
cmd = ['/soft/R/3.1.1/bin/Rscript', graph_function, filename, 'readmap']
p = subprocess.Popen(cmd)
p.wait()
|
neyhartj/GBarleyS
|
Pipeline_Scripts/read_mapping_stats_parser.py
|
Python
|
gpl-3.0
| 5,425
|
[
"Bowtie"
] |
fd2ae098930b0c96de69c20d667a74917c5fae26f4ca6212d91cce6355a86073
|
#!/usr/local/bin/env python
"""
Test storageinterface.py facility.
The tests are written around the netcdf storage handler for its asserts (its default)
Testing the storage handlers themselves should be left to the test_storage_iodrivers.py file
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import numpy as np
try:
from openmm import unit
except ImportError: # OpenMM < 7.6
from simtk import unit
import contextlib
import tempfile
from openmmtools.utils import temporary_directory
from nose import tools
from openmmtools.storage import StorageInterface, NetCDFIODriver
# =============================================================================================
# TEST HELPER FUNCTIONS
# =============================================================================================
def spawn_driver(path):
"""Create a driver that is used to test the StorageInterface class at path location"""
return NetCDFIODriver(path)
# =============================================================================================
# STORAGE INTERFACE TESTING FUNCTIONS
# =============================================================================================
def test_storage_interface_creation():
"""Test that the storage interface can create a top level file and read from it"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
si.add_metadata('name', 'data')
assert si.storage_driver.ncfile.getncattr('name') == 'data'
@tools.raises(Exception)
def test_read_trap():
"""Test that attempting to read a non-existent file fails"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
si.var1.read()
def test_variable_write_read():
"""Test that a variable can be create and written to file"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
input_data = 4
si.four.write(input_data)
output_data = si.four.read()
assert output_data == input_data
def test_variable_append_read():
"""Test that a variable can be create and written to file"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
input_data = np.eye(3) * 4.0
si.four.append(input_data)
si.four.append(input_data)
output_data = si.four.read()
assert np.all(output_data[0] == input_data)
assert np.all(output_data[1] == input_data)
def test_at_index_write():
"""Test that writing at a specific index of appended data works"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
input_data = 4
overwrite_data = 5
for i in range(3):
si.four.append(input_data)
si.four.write(overwrite_data, at_index=1) # Sacrilege, I know -LNN
output_data = si.four.read()
assert np.all(output_data[0] == input_data)
assert np.all(output_data[2] == input_data)
assert np.all(output_data[1] == overwrite_data)
def test_unbound_read():
"""Test that a variable can read from the file without previous binding"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
input_data = 4*unit.kelvin
si.four.write(input_data)
si.storage_driver.close()
del si
driver = spawn_driver(test_store)
si = StorageInterface(driver)
output_data = si.four.read()
assert input_data == output_data
def test_directory_creation():
"""Test that automatic directory-like objects are created on the fly"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
input_data = 'four'
si.dir0.dir1.dir2.var.write(input_data)
ncfile = si.storage_driver.ncfile
target = ncfile
for i in range(3):
my_dir = 'dir{}'.format(i)
assert my_dir in target.groups
target = target.groups[my_dir]
si.storage_driver.close()
del si
driver = spawn_driver(test_store)
si = StorageInterface(driver)
target = si
for i in range(3):
my_dir = 'dir{}'.format(i)
target = getattr(target, my_dir)
assert target.var.read() == input_data
def test_multi_variable_creation():
"""Test that multiple variables can be created in a single directory structure"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
input_data = [4.0, 4.0, 4.0]
si.dir0.var0.write(input_data)
si.dir0.var1.append(input_data)
si.dir0.var1.append(input_data)
si.storage_driver.close()
del si, driver
driver = spawn_driver(test_store)
si = StorageInterface(driver)
assert si.dir0.var0.read() == input_data
app_data = si.dir0.var1.read()
assert app_data[0] == input_data
assert app_data[1] == input_data
def test_metadata_creation():
"""Test that metadata can be added to variables and directories"""
with temporary_directory() as tmp_dir:
test_store = tmp_dir + '/teststore.nc'
driver = spawn_driver(test_store)
si = StorageInterface(driver)
input_data = 4
si.dir0.var1.write(input_data)
si.dir0.add_metadata('AmIAGroup', 'yes')
si.dir0.var1.add_metadata('AmIAGroup', 'no')
dir0 = si.storage_driver.ncfile.groups['dir0']
var1 = dir0.variables['var1']
assert dir0.getncattr('AmIAGroup') == 'yes'
assert var1.getncattr('AmIAGroup') == 'no'
|
choderalab/openmmtools
|
openmmtools/tests/test_storage_interface.py
|
Python
|
mit
| 6,466
|
[
"NetCDF",
"OpenMM"
] |
1eb7fd7e54df72315f8df4ad8f28d7c81a344056fffc1b1ad56c8c994d30878e
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Oliver Backhouse <olbackhouse@gmail.com>
# George Booth <george.booth@kcl.ac.uk>
#
'''
Auxiliary second-order Green's function perturbation therory
============================================================
The AGF2 method permits the computation of quasiparticle excitations and
ground-state properties at the AGF2(None,0) level.
When using results of this code for publications, please cite the follow papers:
"Wave function perspective and efficient truncation of renormalized second-order perturbation theory", O. J. Backhouse, M. Nusspickel and G. H. Booth, J. Chem. Theory Comput., 16, 1090 (2020).
"Efficient excitations and spectra within a perturbative renormalization approach", O. J. Backhouse and G. H. Booth, J. Chem. Theory Comput., 16, 6294 (2020).
Simple usage::
>>> from pyscf import gto, scf, agf2
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> gf2 = agf2.AGF2(mf).run()
>>> gf2.ipagf2()
:func:`agf2.AGF2` returns an instance of AGF2 class. Valid parameters to
control the AGF2 method are:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
Convergence threshold for AGF2 energy. Default value is 1e-7
conv_tol_rdm1 : float
Convergence threshold for first-order reduced density matrix.
Default value is 1e-6.
conv_tol_nelec : float
Convergence threshold for the number of electrons. Default
value is 1e-6.
max_cycle : int
Maximum number of AGF2 iterations. Default value is 50.
max_cycle_outer : int
Maximum number of outer Fock loop iterations. Default
value is 20.
max_cycle_inner : int
Maximum number of inner Fock loop iterations. Default
value is 50.
weight_tol : float
Threshold in spectral weight of auxiliaries to be considered
zero. Default 1e-11.
diis_space : int
DIIS space size for Fock loop iterations. Default value is 6.
diis_min_space :
Minimum space of DIIS. Default value is 1.
Saved result
e_corr : float
AGF2 correlation energy
e_tot : float
Total energy (HF + correlation)
e_1b : float
One-body part of :attr:`e_tot`
e_2b : float
Two-body part of :attr:`e_tot`
e_init : float
Initial correlation energy (truncated MP2)
converged : bool
Whether convergence was successful
se : SelfEnergy
Auxiliaries of the self-energy
gf : GreensFunction
Auxiliaries of the Green's function
'''
from pyscf import scf, lib
from pyscf.agf2 import aux_space, ragf2, uagf2, dfragf2, dfuagf2, ragf2_slow, uagf2_slow
from pyscf.agf2.aux_space import AuxiliarySpace, GreensFunction, SelfEnergy
# Backwards compatibility:
aux = aux_space
def AGF2(mf, nmom=(None,0), frozen=None, mo_energy=None, mo_coeff=None, mo_occ=None):
if isinstance(mf, scf.uhf.UHF):
return UAGF2(mf, nmom, frozen, mo_energy, mo_coeff, mo_occ)
elif isinstance(mf, scf.rohf.ROHF):
lib.logger.warn(mf, 'RAGF2 method does not support ROHF reference. '
'Converting to UHF and using UAGF2.')
mf = scf.addons.convert_to_uhf(mf)
return UAGF2(mf, nmom, frozen, mo_energy, mo_coeff, mo_occ)
elif isinstance(mf, scf.rhf.RHF):
return RAGF2(mf, nmom, frozen, mo_energy, mo_coeff, mo_occ)
else:
raise RuntimeError('AGF2 code only supports RHF, ROHF and UHF references')
AGF2.__doc__ = ragf2.RAGF2.__doc__
def RAGF2(mf, nmom=(None,0), frozen=None, mo_energy=None, mo_coeff=None, mo_occ=None):
if nmom != (None,0): # redundant
if nmom[1] == 0 and nmom[0] != 0:
nmom = (None,0)
if nmom != (None,0) and getattr(mf, 'with_df', None) is not None:
raise RuntimeError('AGF2 with custom moment orders does not '
'density fitting.')
elif nmom != (None,0):
lib.logger.warn(mf, 'AGF2 called with custom moment orders - '
'falling back on _slow implementations.')
return ragf2_slow.RAGF2(mf, nmom, frozen, mo_energy, mo_coeff, mo_occ)
elif getattr(mf, 'with_df', None) is not None:
return dfragf2.DFRAGF2(mf, frozen, mo_energy, mo_coeff, mo_occ)
else:
return ragf2.RAGF2(mf, frozen, mo_energy, mo_coeff, mo_occ)
RAGF2.__doc__ = ragf2.RAGF2.__doc__
def UAGF2(mf, nmom=(None,0), frozen=None, mo_energy=None, mo_coeff=None, mo_occ=None):
if nmom != (None,0): # redundant
if nmom[1] == 0 and nmom[0] != 0:
nmom = (None,0)
if nmom != (None,0) and getattr(mf, 'with_df', None) is not None:
raise RuntimeError('AGF2 with custom moment orders does not '
'density fitting.')
elif nmom != (None,0):
lib.logger.warn(mf, 'AGF2 called with custom moment orders - '
'falling back on _slow implementations.')
return uagf2_slow.UAGF2(mf, nmom, frozen, mo_energy, mo_coeff, mo_occ)
elif getattr(mf, 'with_df', None) is not None:
return dfuagf2.DFUAGF2(mf, frozen, mo_energy, mo_coeff, mo_occ)
else:
return uagf2.UAGF2(mf, frozen, mo_energy, mo_coeff, mo_occ)
UAGF2.__doc__ = uagf2.UAGF2.__doc__
|
sunqm/pyscf
|
pyscf/agf2/__init__.py
|
Python
|
apache-2.0
| 6,029
|
[
"PySCF"
] |
1b421d2fc38d589c87ecf79201495610b3b493fe47cb6d83b5fc33b0a1b266fd
|
#!/usr/bin/env python
import numpy as np
def tran_op(op, tmat):
"""
transform quantum operator from representation A to
another representation B
Args:
op: the matrix form of operator in representation A
tmat: the unitary transform matrix
"""
return np.dot(np.dot(np.conj(np.transpose(tmat)), op), tmat)
def tmat_c2r(case, ispin=False):
"""
the transform matrix from complex shperical harmonics to
real spherical harmonics
Args:
case: label for different systems
ispin: whether to include spin or not
"""
sqrt2 = np.sqrt(2.0)
ci = np.complex128(0.0+1.0j)
cone = np.complex128(1.0+0.0j)
if case.strip() == 's':
nband = 1
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
t_c2r[0,0] = cone
elif case.strip() == 'p':
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 'pwien': # in wien by default px py pz
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 'pwann': # p in wannier basis vasp
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
t_r2w = np.zeros((nband, nband), dtype=np.complex128)
t_c2w = np.zeros((nband, nband), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
# pz = (px,py,pz) (0,0,1)^T
t_r2w[2,0] = cone
# px = (px,py,pz) (1,0,0)^T
t_r2w[0,1] = cone
# py = (px,py,pz) (0,1,0)^T
t_r2w[1,2] = cone
t_c2w = np.dot(t_c2r,t_r2w)
t_c2r = t_c2w
elif case.strip() == 't2g':
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dzx --> py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,0] = ci/sqrt2
t_c2r[2,0] = ci/sqrt2
# dzy --> px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,1] = cone/sqrt2
t_c2r[2,1] = -cone/sqrt2
# dxy --> pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 'd':
nband = 5
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dz2=|2,0>
t_c2r[2,0] = cone
# dzx=1/sqrt(2)( |2,-1> - |2,1> )
t_c2r[1,1] = cone/sqrt2
t_c2r[3,1] = -cone/sqrt2
# dzy=i/sqrt(2)( |2,-1> + |2,1> )
t_c2r[1,2] = ci/sqrt2
t_c2r[3,2] = ci/sqrt2
# dx2-y2=1/sqrt(2)( |2,-2> + |2,2> )
t_c2r[0,3] = cone/sqrt2
t_c2r[4,3] = cone/sqrt2
# dxy=i/sqrt(2)( |2,-2> - |2,2> )
t_c2r[0,4] = ci/sqrt2
t_c2r[4,4] = -ci/sqrt2
elif case.strip() == 'dwien': # by default wien2k: dxy dzx dyz dx2-y2 dz2
nband = 5
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dz2=|2,4>
t_c2r[2,4] = cone
# dzx=1/sqrt(2)( |2,-1> - |2,1> )
t_c2r[1,1] = cone/sqrt2
t_c2r[3,1] = -cone/sqrt2
# dzy=i/sqrt(2)( |2,-1> + |2,1> )
t_c2r[1,2] = ci/sqrt2
t_c2r[3,2] = ci/sqrt2
# dx2-y2=1/sqrt(2)( |2,-2> + |2,2> )
t_c2r[0,3] = cone/sqrt2
t_c2r[4,3] = cone/sqrt2
# dxy=i/sqrt(2)( |2,-2> - |2,2> )
t_c2r[0,0] = ci/sqrt2
t_c2r[4,0] = -ci/sqrt2
elif case.strip() == 'f':
nband = 7
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# fz3 = |3,0>
t_c2r[3, 0] = cone
# fxz2 = 1/sqrt(2)( |3,-1> - |3,1> )
t_c2r[2, 1] = cone/sqrt2
t_c2r[4, 1] = -cone/sqrt2
# fyz2 = i/sqrt(2)( |3,-1> + |3,1> )
t_c2r[2, 2] = ci/sqrt2
t_c2r[4, 2] = ci/sqrt2
# fz(x2-y2) = 1/sqrt(2)( |3,-2> + |3,2> )
t_c2r[1, 3] = cone/sqrt2
t_c2r[5, 3] = cone/sqrt2
# fxyz = i/sqrt(2)( |3,-2> - |3,2> )
t_c2r[1, 4] = ci/sqrt2
t_c2r[5, 4] = -ci/sqrt2
# fx(x2-3y2) = 1/sqrt(2) ( |3,-3> - |3,3> )
t_c2r[0, 5] = cone/sqrt2
t_c2r[6, 5] = -cone/sqrt2
# fy(3x2-y2) = i/sqrt(2) ( |3,-3> + |3,3> )
t_c2r[0, 6] = ci/sqrt2
t_c2r[6, 6] = ci/sqrt2
elif case.strip() == 'fwien': # fxz2 fyz2 fz3 fx(x2-3y2) fy(3x2-y2) fz(x2-y2) fxyz
nband = 7
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# fz3 = |3,0>
t_c2r[3, 2] = cone
# fxz2 = 1/sqrt(2)( |3,-1> - |3,1> )
t_c2r[2, 0] = cone/sqrt2
t_c2r[4, 0] = -cone/sqrt2
# fyz2 = i/sqrt(2)( |3,-1> + |3,1> )
t_c2r[2, 1] = ci/sqrt2
t_c2r[4, 1] = ci/sqrt2
# fz(x2-y2) = 1/sqrt(2)( |3,-2> + |3,2> )
t_c2r[1, 5] = cone/sqrt2
t_c2r[5, 5] = cone/sqrt2
# fxyz = i/sqrt(2)( |3,-2> - |3,2> )
t_c2r[1, 6] = ci/sqrt2
t_c2r[5, 6] = -ci/sqrt2
# fx(x2-3y2) = 1/sqrt(2) ( |3,-3> - |3,3> )
t_c2r[0, 3] = cone/sqrt2
t_c2r[6, 3] = -cone/sqrt2
# fy(3x2-y2) = i/sqrt(2) ( |3,-3> + |3,3> )
t_c2r[0, 4] = ci/sqrt2
t_c2r[6, 4] = ci/sqrt2
else:
print "don't support t_c2r for this case: ", case
return
if ispin:
norbs=2*nband
t_c2r_spin = np.zeros((norbs,norbs), dtype=np.complex128)
t_c2r_spin[0:norbs:2,0:norbs:2] = t_c2r
t_c2r_spin[1:norbs:2,1:norbs:2] = t_c2r
return t_c2r_spin
else:
return t_c2r
def tmat_r2c(case, ispin=False):
"""
the transform matrix from real spherical harmonics to
complex shperical harmonics
Args:
case: label for different systems
ispin: whether to include spin or not
"""
return np.conj(np.transpose(tmat_c2r(case, ispin)))
def tmat_r2cub(ispin=False):
"""
the transform matrix from real spherical harmonics to the cubic
spherical harmonics, just for f system
Args:
ispin: whether to include spin or not
"""
a = np.sqrt(10.0) / 4.0 + 0.0j
b = np.sqrt(6.0) / 4.0 + 0.0j
c = 1.0 + 0.0j
nband = 7
t_r2cub = np.zeros((nband,nband), dtype=np.complex128)
# fx3 = -sqrt(6)/4 fxz2 + sqrt(10)/4 fx(x2-3y2)
t_r2cub[1, 0] = -b
t_r2cub[5, 0] = a
# fy3 = -sqrt(6)/4 fyz2 - sqrt(10)/4 fy(3x2-y2)
t_r2cub[2, 1] = -b
t_r2cub[6, 1] = -a
# fz3 = fz3
t_r2cub[0, 2] = c
# fx(y2-z2) = -sqrt(10)/4 fxz2 - sqrt(6)/4 fx(x2-3y2)
t_r2cub[1, 3] = -a
t_r2cub[5, 3] = -b
# fy(z2-x2) = sqrt(10)/4 fyz2 - sqrt(6)/4 fy(3x2-y2)
t_r2cub[2, 4] = a
t_r2cub[6, 4] = -b
# fz(x2-y2) = fz(x2-y2)
t_r2cub[3, 5] = c
# fxyz = fxyz
t_r2cub[4, 6] = c
if ispin:
norbs = 2 * nband
t_r2cub_spin = np.zeros((norbs, norbs), dtype=np.complex128)
t_r2cub_spin[0:norbs:2,0:norbs:2] = t_r2cub
t_r2cub_spin[1:norbs:2,1:norbs:2] = t_r2cub
return t_r2cub_spin
else:
return t_r2cub
def tmat_cub2r(ispin=False):
"""
the transform matrix from cubic spherical harmonics to
real spherical harmonics, just for f system
Args:
ispin: whether to include spin or not
"""
return np.conj( np.transpose( tmat_r2cub(ispin) ) )
def tmat_c2j(l):
"""
the transform matrix from complex shperical harmonics to
j2,jz basis
Args:
case: label for different systems
"""
if l == 1:
t_c2j = np.zeros((6, 6), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(2.0/3.0)
t_c2j[3,0] = np.sqrt(1.0/3.0)
t_c2j[2,1] = -np.sqrt(1.0/3.0)
t_c2j[5,1] = np.sqrt(2.0/3.0)
t_c2j[1,2] = 1.0
t_c2j[0,3] = np.sqrt(1.0/3.0)
t_c2j[3,3] = np.sqrt(2.0/3.0)
t_c2j[2,4] = np.sqrt(2.0/3.0)
t_c2j[5,4] = np.sqrt(1.0/3.0)
t_c2j[4,5] = 1.0
return t_c2j
elif l == 2:
t_c2j = np.zeros((10, 10), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(4.0/5.0)
t_c2j[3,0] = np.sqrt(1.0/5.0)
t_c2j[2,1] = -np.sqrt(3.0/5.0)
t_c2j[5,1] = np.sqrt(2.0/5.0)
t_c2j[4,2] = -np.sqrt(2.0/5.0)
t_c2j[7,2] = np.sqrt(3.0/5.0)
t_c2j[6,3] = -np.sqrt(1.0/5.0)
t_c2j[9,3] = np.sqrt(4.0/5.0)
t_c2j[1,4] = 1.0
t_c2j[0,5] = np.sqrt(1.0/5.0)
t_c2j[3,5] = np.sqrt(4.0/5.0)
t_c2j[2,6] = np.sqrt(2.0/5.0)
t_c2j[5,6] = np.sqrt(3.0/5.0)
t_c2j[4,7] = np.sqrt(3.0/5.0)
t_c2j[7,7] = np.sqrt(2.0/5.0)
t_c2j[6,8] = np.sqrt(4.0/5.0)
t_c2j[9,8] = np.sqrt(1.0/5.0)
t_c2j[8,9] = 1.0
return t_c2j
elif l == 3:
t_c2j = np.zeros((14,14), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(6.0/7.0)
t_c2j[3,0] = np.sqrt(1.0/7.0)
t_c2j[2,1] = -np.sqrt(5.0/7.0)
t_c2j[5,1] = np.sqrt(2.0/7.0)
t_c2j[4,2] = -np.sqrt(4.0/7.0)
t_c2j[7,2] = np.sqrt(3.0/7.0)
t_c2j[6,3] = -np.sqrt(3.0/7.0)
t_c2j[9,3] = np.sqrt(4.0/7.0)
t_c2j[8,4] = -np.sqrt(2.0/7.0)
t_c2j[11,4] = np.sqrt(5.0/7.0)
t_c2j[10,5] = -np.sqrt(1.0/7.0)
t_c2j[13,5] = np.sqrt(6.0/7.0)
t_c2j[1,6] = 1.0
t_c2j[0,7] = np.sqrt(1.0/7.0)
t_c2j[3,7] = np.sqrt(6.0/7.0)
t_c2j[2,8] = np.sqrt(2.0/7.0)
t_c2j[5,8] = np.sqrt(5.0/7.0)
t_c2j[4,9] = np.sqrt(3.0/7.0)
t_c2j[7,9] = np.sqrt(4.0/7.0)
t_c2j[6,10] = np.sqrt(4.0/7.0)
t_c2j[9,10] = np.sqrt(3.0/7.0)
t_c2j[8,11] = np.sqrt(5.0/7.0)
t_c2j[11,11] = np.sqrt(2.0/7.0)
t_c2j[10,12] = np.sqrt(6.0/7.0)
t_c2j[13,12] = np.sqrt(1.0/7.0)
t_c2j[12,13] = 1.0
return t_c2j
else:
print "NOT Implemented !!!"
def fourier_hr2hk(norbs, nkpt, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
hk = np.zeros((nkpt, norbs, norbs), dtype=np.complex128)
for i in range(nkpt):
#print "kvec", i, kvec[i,:]
for j in range(nrpt):
coef = 2*np.pi*np.dot(kvec[i,:], np.float64(rvec[j,:]))
ratio = (np.cos(coef) + np.sin(coef) * 1j) / np.float64(deg_rpt[j])
hk[i,:,:] = hk[i,:,:] + ratio * hr[j,:,:]
return hk
def fourier_hr2h1k(norbs, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
hk = np.zeros((norbs, norbs), dtype=np.complex128)
for i in range(nrpt):
coef = 2*np.pi*np.dot(kvec, np.float64(rvec[i,:]))
ratio = (np.cos(coef) + np.sin(coef) * 1.0j) / np.float64(deg_rpt[i])
hk[:,:] = hk[:,:] + ratio * hr[i,:,:]
return hk
def myfourier_hr2hk(norbs, nkpt, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
print "ALERT: Gauge changed: not exp(-ikR) but exp(ikR)*exp(ik*tau_mu)"
hk = np.zeros((nkpt, norbs, norbs), dtype=np.complex128)
for i in range(nkpt):
for j in range(nrpt):
coef = 2*np.pi*np.dot(kvec[i,:], rvec[j,:])
ratio = (np.cos(coef) + np.sin(coef) * 1j) / float(deg_rpt[j])
hk[i,:,:] = hk[i,:,:] + ratio * hr[j,:,:]
return hk
def fourier_hr2hk_gauge(norbs, nkpt, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
hk = np.zeros((nkpt, norbs, norbs), dtype=np.complex128)
for i in range(nkpt):
print "kvec", i, kvec[i,:]
for j in range(nrpt):
coef = 2*np.pi*np.dot(kvec[i,:], rvec[j,:])
ratio = (np.cos(coef) + np.sin(coef) * 1j) / float(deg_rpt[j])
hk[i,:,:] = hk[i,:,:] + ratio * hr[j,:,:]
return hk
|
quanshengwu/wannier_tools
|
utility/wannhr_symm/lib/tran.py
|
Python
|
gpl-3.0
| 13,790
|
[
"VASP",
"WIEN2k"
] |
2a6a510bba3e4c8a8136ca811037f0085dcce624be0325a5c649da9e41b1c67c
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.model import SuiteVisitor, TagPattern
from robot.utils import Matcher, plural_or_not
def KeywordRemover(how):
upper = how.upper()
if upper.startswith('NAME:'):
return ByNameKeywordRemover(pattern=how[5:])
if upper.startswith('TAG:'):
return ByTagKeywordRemover(pattern=how[4:])
try:
return {'ALL': AllKeywordsRemover,
'PASSED': PassedKeywordRemover,
'FOR': ForLoopItemsRemover,
'WUKS': WaitUntilKeywordSucceedsRemover}[upper]()
except KeyError:
raise DataError("Expected 'ALL', 'PASSED', 'NAME:<pattern>', 'FOR', "
"or 'WUKS' but got '%s'." % how)
class _KeywordRemover(SuiteVisitor):
_message = 'Keyword data removed using --RemoveKeywords option.'
def __init__(self):
self._removal_message = RemovalMessage(self._message)
def _clear_content(self, kw):
kw.keywords = []
kw.messages = []
self._removal_message.set(kw)
def _failed_or_warning_or_error(self, item):
return not item.passed or self._warning_or_error(item)
def _warning_or_error(self, item):
finder = WarningAndErrorFinder()
item.visit(finder)
return finder.found
class AllKeywordsRemover(_KeywordRemover):
def visit_keyword(self, keyword):
self._clear_content(keyword)
class PassedKeywordRemover(_KeywordRemover):
def start_suite(self, suite):
if not suite.statistics.all.failed:
for keyword in suite.keywords:
if not self._warning_or_error(keyword):
self._clear_content(keyword)
def visit_test(self, test):
if not self._failed_or_warning_or_error(test):
for keyword in test.keywords:
self._clear_content(keyword)
def visit_keyword(self, keyword):
pass
class ByNameKeywordRemover(_KeywordRemover):
def __init__(self, pattern):
_KeywordRemover.__init__(self)
self._matcher = Matcher(pattern, ignore='_')
def start_keyword(self, kw):
if self._matcher.match(kw.name) and not self._warning_or_error(kw):
self._clear_content(kw)
class ByTagKeywordRemover(_KeywordRemover):
def __init__(self, pattern):
_KeywordRemover.__init__(self)
self._pattern = TagPattern(pattern)
def start_keyword(self, kw):
if self._pattern.match(kw.tags) and not self._warning_or_error(kw):
self._clear_content(kw)
class ForLoopItemsRemover(_KeywordRemover):
_message = '%d passing step%s removed using --RemoveKeywords option.'
def start_keyword(self, kw):
if kw.type == kw.FOR_LOOP_TYPE:
before = len(kw.keywords)
kw.keywords = self._remove_keywords(kw.keywords)
self._removal_message.set_if_removed(kw, before)
def _remove_keywords(self, keywords):
return [kw for kw in keywords
if self._failed_or_warning_or_error(kw) or kw is keywords[-1]]
class WaitUntilKeywordSucceedsRemover(_KeywordRemover):
_message = '%d failing step%s removed using --RemoveKeywords option.'
def start_keyword(self, kw):
if kw.name == 'BuiltIn.Wait Until Keyword Succeeds' and kw.keywords:
before = len(kw.keywords)
kw.keywords = self._remove_keywords(list(kw.keywords))
self._removal_message.set_if_removed(kw, before)
def _remove_keywords(self, keywords):
include_from_end = 2 if keywords[-1].passed else 1
return self._kws_with_warnings(keywords[:-include_from_end]) \
+ keywords[-include_from_end:]
def _kws_with_warnings(self, keywords):
return [kw for kw in keywords if self._warning_or_error(kw)]
class WarningAndErrorFinder(SuiteVisitor):
def __init__(self):
self.found = False
def start_suite(self, suite):
return not self.found
def start_test(self, test):
return not self.found
def start_keyword(self, keyword):
return not self.found
def visit_message(self, msg):
if msg.level in ('WARN', 'ERROR'):
self.found = True
class RemovalMessage(object):
def __init__(self, message):
self._message = message
def set_if_removed(self, kw, len_before):
removed = len_before - len(kw.keywords)
if removed:
self.set(kw, self._message % (removed, plural_or_not(removed)))
def set(self, kw, message=None):
kw.doc = ('%s\n\n_%s_' % (kw.doc, message or self._message)).strip()
|
alexandrul-ci/robotframework
|
src/robot/result/keywordremover.py
|
Python
|
apache-2.0
| 5,240
|
[
"VisIt"
] |
f20a22e550168b134ff1faf4128e5c36ea716431aa142617a78c4cd0898bbd0c
|
# /usr/bin/python
# Last Change: Tue Jul 17 11:00 PM 2007 J
"""Module implementing GM, a class which represents Gaussian mixtures.
GM instances can be used to create, sample mixtures. They also provide
different plotting facilities, such as isodensity contour for multi dimensional
models, ellipses of confidence."""
__docformat__ = 'restructuredtext'
import numpy as N
from numpy.random import randn, rand
import numpy.linalg as lin
import densities as D
import misc
# Right now, two main usages of a Gaussian Model are possible
# - init a Gaussian Model with meta-parameters, and trains it
# - set-up a Gaussian Model to sample it, draw ellipsoides
# of confidences. In this case, we would like to init it with
# known values of parameters. This can be done with the class method
# fromval
# TODO:
# - change bounds methods of GM class instanciations so that it cannot
# be used as long as w, mu and va are not set
# - We have to use scipy now for chisquare pdf, so there may be other
# methods to be used, ie for implementing random index.
# - there is no check on internal state of the GM, that is does w, mu and va
# values make sense (eg singular values) - plot1d is still very rhough. There
# should be a sensible way to modify the result plot (maybe returns a dic
# with global pdf, component pdf and fill matplotlib handles). Should be
# coherent with plot
class GmParamError(Exception):
"""Exception raised for errors in gmm params
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
Exception.__init__(self)
self.message = message
def __str__(self):
return self.message
class GM:
"""Gaussian Mixture class. This is a simple container class
to hold Gaussian Mixture parameters (weights, mean, etc...).
It can also draw itself (confidence ellipses) and samples itself.
"""
# I am not sure it is useful to have a spherical mode...
_cov_mod = ['diag', 'full']
#===============================
# Methods to construct a mixture
#===============================
def __init__(self, d, k, mode = 'diag'):
"""Init a Gaussian Mixture.
:Parameters:
d : int
dimension of the mixture.
k : int
number of component in the mixture.
mode : string
mode of covariance
:Returns:
an instance of GM.
Note
----
Only full and diag mode are supported for now.
:SeeAlso:
If you want to build a Gaussian Mixture with knowns weights, means
and variances, you can use GM.fromvalues method directly"""
if mode not in self._cov_mod:
raise GmParamError("mode %s not recognized" + str(mode))
self.d = d
self.k = k
self.mode = mode
# Init to 0 all parameters, with the right dimensions.
# Not sure this is useful in python from an efficiency POV ?
self.w = N.zeros(k)
self.mu = N.zeros((k, d))
if mode == 'diag':
self.va = N.zeros((k, d))
elif mode == 'full':
self.va = N.zeros((k * d, d))
self.__is_valid = False
if d > 1:
self.__is1d = False
else:
self.__is1d = True
def set_param(self, weights, mu, sigma):
"""Set parameters of the model.
Args should be conformant with metparameters d and k given during
initialisation.
:Parameters:
weights : ndarray
weights of the mixture (k elements)
mu : ndarray
means of the mixture. One component's mean per row, k row for k
components.
sigma : ndarray
variances of the mixture. For diagonal models, one row contains
the diagonal elements of the covariance matrix. For full
covariance, d rows for one variance.
Examples
--------
Create a 3 component, 2 dimension mixture with full covariance matrices
>>> w = numpy.array([0.2, 0.5, 0.3])
>>> mu = numpy.array([[0., 0.], [1., 1.]])
>>> va = numpy.array([[1., 0.], [0., 1.], [2., 0.5], [0.5, 1]])
>>> gm = GM(2, 3, 'full')
>>> gm.set_param(w, mu, va)
:SeeAlso:
If you know already the parameters when creating the model, you can
simply use the method class GM.fromvalues."""
#XXX: when fromvalues is called, parameters are called twice...
k, d, mode = check_gmm_param(weights, mu, sigma)
if not k == self.k:
raise GmParamError("Number of given components is %d, expected %d"
% (k, self.k))
if not d == self.d:
raise GmParamError("Dimension of the given model is %d, "\
"expected %d" % (d, self.d))
if not mode == self.mode and not d == 1:
raise GmParamError("Given covariance mode is %s, expected %s"
% (mode, self.mode))
self.w = weights
self.mu = mu
self.va = sigma
self.__is_valid = True
@classmethod
def fromvalues(cls, weights, mu, sigma):
"""This class method can be used to create a GM model
directly from its parameters weights, mean and variance
:Parameters:
weights : ndarray
weights of the mixture (k elements)
mu : ndarray
means of the mixture. One component's mean per row, k row for k
components.
sigma : ndarray
variances of the mixture. For diagonal models, one row contains
the diagonal elements of the covariance matrix. For full
covariance, d rows for one variance.
:Returns:
gm : GM
an instance of GM.
Examples
--------
>>> w, mu, va = GM.gen_param(d, k)
>>> gm = GM(d, k)
>>> gm.set_param(w, mu, va)
and
>>> w, mu, va = GM.gen_param(d, k)
>>> gm = GM.fromvalue(w, mu, va)
are strictly equivalent."""
k, d, mode = check_gmm_param(weights, mu, sigma)
res = cls(d, k, mode)
res.set_param(weights, mu, sigma)
return res
#=====================================================
# Fundamental facilities (sampling, confidence, etc..)
#=====================================================
def sample(self, nframes):
""" Sample nframes frames from the model.
:Parameters:
nframes : int
number of samples to draw.
:Returns:
samples : ndarray
samples in the format one sample per row (nframes, d)."""
if not self.__is_valid:
raise GmParamError("""Parameters of the model has not been
set yet, please set them using self.set_param()""")
# State index (ie hidden var)
sti = gen_rand_index(self.w, nframes)
# standard gaussian samples
x = randn(nframes, self.d)
if self.mode == 'diag':
x = self.mu[sti, :] + x * N.sqrt(self.va[sti, :])
elif self.mode == 'full':
# Faster:
cho = N.zeros((self.k, self.va.shape[1], self.va.shape[1]))
for i in range(self.k):
# Using cholesky looks more stable than sqrtm; sqrtm is not
# available in numpy anyway, only in scipy...
cho[i] = lin.cholesky(self.va[i*self.d:i*self.d+self.d, :])
for s in range(self.k):
tmpind = N.where(sti == s)[0]
x[tmpind] = N.dot(x[tmpind], cho[s].T) + self.mu[s]
else:
raise GmParamError("cov matrix mode not recognized, "\
"this is a bug !")
return x
def conf_ellipses(self, dim = misc.DEF_VIS_DIM, npoints = misc.DEF_ELL_NP,
level = misc.DEF_LEVEL):
"""Returns a list of confidence ellipsoids describing the Gmm
defined by mu and va. Check densities.gauss_ell for details
:Parameters:
dim : sequence
sequences of two integers which represent the dimensions where to
project the ellipsoid.
npoints : int
number of points to generate for the ellipse.
level : float
level of confidence (between 0 and 1).
:Returns:
xe : sequence
a list of x coordinates for the ellipses (Xe[i] is the array
containing x coordinates of the ith Gaussian)
ye : sequence
a list of y coordinates for the ellipses.
Examples
--------
Suppose we have w, mu and va as parameters for a mixture, then:
>>> gm = GM(d, k)
>>> gm.set_param(w, mu, va)
>>> X = gm.sample(1000)
>>> Xe, Ye = gm.conf_ellipsoids()
>>> pylab.plot(X[:,0], X[:, 1], '.')
>>> for k in len(w):
... pylab.plot(Xe[k], Ye[k], 'r')
Will plot samples X draw from the mixture model, and
plot the ellipses of equi-probability from the mean with
default level of confidence."""
if self.__is1d:
raise ValueError("This function does not make sense for 1d "
"mixtures.")
if not self.__is_valid:
raise GmParamError("""Parameters of the model has not been
set yet, please set them using self.set_param()""")
xe = []
ye = []
if self.mode == 'diag':
for i in range(self.k):
x, y = D.gauss_ell(self.mu[i, :], self.va[i, :],
dim, npoints, level)
xe.append(x)
ye.append(y)
elif self.mode == 'full':
for i in range(self.k):
x, y = D.gauss_ell(self.mu[i, :],
self.va[i*self.d:i*self.d+self.d, :],
dim, npoints, level)
xe.append(x)
ye.append(y)
return xe, ye
def check_state(self):
"""Returns true if the parameters of the model are valid.
For Gaussian mixtures, this means weights summing to 1, and variances
to be positive definite.
"""
if not self.__is_valid:
raise GmParamError("Parameters of the model has not been"\
"set yet, please set them using self.set_param()")
# Check condition number for cov matrix
if self.mode == 'diag':
tinfo = N.finfo(self.va.dtype)
if N.any(self.va < tinfo.eps):
raise GmParamError("variances are singular")
elif self.mode == 'full':
try:
d = self.d
for i in range(self.k):
N.linalg.cholesky(self.va[i*d:i*d+d, :])
except N.linalg.LinAlgError:
raise GmParamError("matrix %d is singular " % i)
else:
raise GmParamError("Unknown mode")
return True
@classmethod
def gen_param(cls, d, nc, mode = 'diag', spread = 1):
"""Generate random, valid parameters for a gaussian mixture model.
:Parameters:
d : int
the dimension
nc : int
the number of components
mode : string
covariance matrix mode ('full' or 'diag').
:Returns:
w : ndarray
weights of the mixture
mu : ndarray
means of the mixture
w : ndarray
variances of the mixture
Notes
-----
This is a class method.
"""
w = N.abs(randn(nc))
w = w / sum(w, 0)
mu = spread * N.sqrt(d) * randn(nc, d)
if mode == 'diag':
va = N.abs(randn(nc, d))
elif mode == 'full':
# If A is invertible, A'A is positive definite
va = randn(nc * d, d)
for k in range(nc):
va[k*d:k*d+d] = N.dot( va[k*d:k*d+d],
va[k*d:k*d+d].transpose())
else:
raise GmParamError('cov matrix mode not recognized')
return w, mu, va
#gen_param = classmethod(gen_param)
def pdf(self, x, log = False):
"""Computes the pdf of the model at given points.
:Parameters:
x : ndarray
points where to estimate the pdf. One row for one
multi-dimensional sample (eg to estimate the pdf at 100
different points in 10 dimension, data's shape should be (100,
20)).
log : bool
If true, returns the log pdf instead of the pdf.
:Returns:
y : ndarray
the pdf at points x."""
if log:
return D.logsumexp(
D.multiple_gauss_den(x, self.mu, self.va, log = True)
+ N.log(self.w))
else:
return N.sum(D.multiple_gauss_den(x, self.mu, self.va) * self.w, 1)
def pdf_comp(self, x, cid, log = False):
"""Computes the pdf of the model at given points, at given component.
:Parameters:
x : ndarray
points where to estimate the pdf. One row for one
multi-dimensional sample (eg to estimate the pdf at 100
different points in 10 dimension, data's shape should be (100,
20)).
cid: int
the component index.
log : bool
If true, returns the log pdf instead of the pdf.
:Returns:
y : ndarray
the pdf at points x."""
if self.mode == 'diag':
va = self.va[cid]
elif self.mode == 'full':
va = self.va[cid*self.d:(cid+1)*self.d]
else:
raise GmParamError("""var mode %s not supported""" % self.mode)
if log:
return D.gauss_den(x, self.mu[cid], va, log = True) \
+ N.log(self.w[cid])
else:
return D.multiple_gauss_den(x, self.mu[cid], va) * self.w[cid]
#=================
# Plotting methods
#=================
def plot(self, dim = misc.DEF_VIS_DIM, npoints = misc.DEF_ELL_NP,
level = misc.DEF_LEVEL):
"""Plot the ellipsoides directly for the model
Returns a list of lines handle, so that their style can be modified. By
default, the style is red color, and nolegend for all of them.
:Parameters:
dim : sequence
sequence of two integers, the dimensions of interest.
npoints : int
Number of points to use for the ellipsoids.
level : int
level of confidence (to use with fill argument)
:Returns:
h : sequence
Returns a list of lines handle so that their properties
can be modified (eg color, label, etc...):
Note
----
Does not work for 1d. Requires matplotlib
:SeeAlso:
conf_ellipses is used to compute the ellipses. Use this if you want
to plot with something else than matplotlib."""
if self.__is1d:
raise ValueError("This function does not make sense for 1d "
"mixtures.")
if not self.__is_valid:
raise GmParamError("""Parameters of the model has not been
set yet, please set them using self.set_param()""")
k = self.k
xe, ye = self.conf_ellipses(dim, npoints, level)
try:
import pylab as P
return [P.plot(xe[i], ye[i], 'r', label='_nolegend_')[0] for i in
range(k)]
except ImportError:
raise GmParamError("matplotlib not found, cannot plot...")
def plot1d(self, level = misc.DEF_LEVEL, fill = False, gpdf = False):
"""Plots the pdf of each component of the 1d mixture.
:Parameters:
level : int
level of confidence (to use with fill argument)
fill : bool
if True, the area of the pdf corresponding to the given
confidence intervales is filled.
gpdf : bool
if True, the global pdf is plot.
:Returns:
h : dict
Returns a dictionary h of plot handles so that their properties
can be modified (eg color, label, etc...):
- h['pdf'] is a list of lines, one line per component pdf
- h['gpdf'] is the line for the global pdf
- h['conf'] is a list of filling area
"""
if not self.__is1d:
raise ValueError("This function does not make sense for "\
"mixtures which are not unidimensional")
from scipy.stats import norm
pval = N.sqrt(self.va[:, 0]) * norm(0, 1).ppf((1+level)/2)
# Compute reasonable min/max for the normal pdf: [-mc * std, mc * std]
# gives the range we are taking in account for each gaussian
mc = 3
std = N.sqrt(self.va[:, 0])
mi = N.amin(self.mu[:, 0] - mc * std)
ma = N.amax(self.mu[:, 0] + mc * std)
np = 500
x = N.linspace(mi, ma, np)
# Prepare the dic of plot handles to return
ks = ['pdf', 'conf', 'gpdf']
hp = dict((i, []) for i in ks)
# Compute the densities
y = D.multiple_gauss_den(x[:, N.newaxis], self.mu, self.va, \
log = True) \
+ N.log(self.w)
yt = self.pdf(x[:, N.newaxis])
try:
import pylab as P
for c in range(self.k):
h = P.plot(x, N.exp(y[:, c]), 'r', label ='_nolegend_')
hp['pdf'].extend(h)
if fill:
# Compute x coordinates of filled area
id1 = -pval[c] + self.mu[c]
id2 = pval[c] + self.mu[c]
xc = x[:, N.where(x>id1)[0]]
xc = xc[:, N.where(xc<id2)[0]]
# Compute the graph for filling
yf = self.pdf_comp(xc, c)
xc = N.concatenate(([xc[0]], xc, [xc[-1]]))
yf = N.concatenate(([0], yf, [0]))
h = P.fill(xc, yf, facecolor = 'b', alpha = 0.1,
label='_nolegend_')
hp['conf'].extend(h)
if gpdf:
h = P.plot(x, yt, 'r:', label='_nolegend_')
hp['gpdf'] = h
return hp
except ImportError:
raise GmParamError("matplotlib not found, cannot plot...")
def density_on_grid(self, dim = misc.DEF_VIS_DIM, nx = 50, ny = 50,
nl = 20, maxlevel = 0.95, v = None):
"""Do all the necessary computation for contour plot of mixture's
density.
:Parameters:
dim : sequence
sequence of two integers, the dimensions of interest.
nx : int
Number of points to use for the x axis of the grid
ny : int
Number of points to use for the y axis of the grid
nl : int
Number of contour to plot.
:Returns:
X : ndarray
points of the x axis of the grid
Y : ndarray
points of the y axis of the grid
Z : ndarray
values of the density on X and Y
V : ndarray
Contour values to display.
Note
----
X, Y, Z and V are as expected by matplotlib contour function."""
if self.__is1d:
raise ValueError("This function does not make sense for 1d "
"mixtures.")
# Ok, it is a bit gory. Basically, we want to compute the size of the
# grid. We use conf_ellipse, which will return a couple of points for
# each component, and we can find a grid size which then is just big
# enough to contain all ellipses. This won't work well if two
# ellipsoids are crossing each other a lot (because this assumes that
# at a given point, one component is largely dominant for its
# contribution to the pdf).
xe, ye = self.conf_ellipses(level = maxlevel, dim = dim)
ax = [N.min(xe), N.max(xe), N.min(ye), N.max(ye)]
w = ax[1] - ax[0]
h = ax[3] - ax[2]
x, y, lden = self._densityctr(N.linspace(ax[0]-0.2*w,
ax[1]+0.2*w, nx),
N.linspace(ax[2]-0.2*h,
ax[3]+0.2*h, ny),
dim = dim)
# XXX: how to find "good" values for level ?
if v is None:
v = N.linspace(-5, N.max(lden), nl)
return x, y, lden, N.array(v)
def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):
"""Helper function to compute density contours on a grid."""
gr = N.meshgrid(rangex, rangey)
x = gr[0].flatten()
y = gr[1].flatten()
xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)
dmu = self.mu[:, dim]
dva = self._get_va(dim)
den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)
den = den.reshape(len(rangey), len(rangex))
return gr[0], gr[1], den
def _get_va(self, dim):
"""Returns variance limited do 2 dimension in tuple dim."""
assert len(dim) == 2
dim = N.array(dim)
if dim.any() < 0 or dim.any() >= self.d:
raise ValueError("dim elements should be between 0 and dimension"
" of the mixture.")
if self.mode == 'diag':
return self.va[:, dim]
elif self.mode == 'full':
ld = dim.size
vaselid = N.empty((ld * self.k, ld), N.int)
for i in range(self.k):
vaselid[ld*i] = dim[0] + i * self.d
vaselid[ld*i+1] = dim[1] + i * self.d
vadid = N.empty((ld * self.k, ld), N.int)
for i in range(self.k):
vadid[ld*i] = dim
vadid[ld*i+1] = dim
return self.va[vaselid, vadid]
else:
raise ValueError("Unkown mode")
# Syntactic sugar
def __repr__(self):
msg = ""
msg += "Gaussian Mixture:\n"
msg += " -> %d dimensions\n" % self.d
msg += " -> %d components\n" % self.k
msg += " -> %s covariance \n" % self.mode
if self.__is_valid:
msg += "Has initial values"""
else:
msg += "Has no initial values yet"""
return msg
def __str__(self):
return self.__repr__()
# Function to generate a random index: this is kept outside any class,
# as the function can be useful for other
def gen_rand_index(p, n):
"""Generate a N samples vector containing random index between 1
and length(p), each index i with probability p(i)"""
# TODO Check args here
# TODO: check each value of inverse distribution is
# different
invcdf = N.cumsum(p)
uni = rand(n)
index = N.zeros(n, dtype=int)
# This one should be a bit faster
for k in range(len(p)-1, 0, -1):
blop = N.where(N.logical_and(invcdf[k-1] <= uni,
uni < invcdf[k]))
index[blop] = k
return index
def check_gmm_param(w, mu, va):
"""Check that w, mu and va are valid parameters for
a mixture of gaussian.
w should sum to 1, there should be the same number of component in each
param, the variances should be positive definite, etc...
:Parameters:
w : ndarray
vector or list of weigths of the mixture (K elements)
mu : ndarray
matrix: K * d
va : ndarray
list of variances (vector K * d or square matrices Kd * d)
:Returns:
k : int
number of components
d : int
dimension
mode : string
'diag' if diagonal covariance, 'full' of full matrices
"""
# Check that w is valid
if not len(w.shape) == 1:
raise GmParamError('weight should be a rank 1 array')
if N.fabs(N.sum(w) - 1) > misc.MAX_DBL_DEV:
raise GmParamError('weight does not sum to 1')
# Check that mean and va have the same number of components
k = len(w)
if N.ndim(mu) < 2:
msg = "mu should be a K,d matrix, and a row vector if only 1 comp"
raise GmParamError(msg)
if N.ndim(va) < 2:
msg = """va should be a K,d / K *d, d matrix, and a row vector if
only 1 diag comp"""
raise GmParamError(msg)
(km, d) = mu.shape
(ka, da) = va.shape
if not k == km:
msg = "not same number of component in mean and weights"
raise GmParamError(msg)
if not d == da:
msg = "not same number of dimensions in mean and variances"
raise GmParamError(msg)
if km == ka:
mode = 'diag'
else:
mode = 'full'
if not ka == km*d:
msg = "not same number of dimensions in mean and variances"
raise GmParamError(msg)
return k, d, mode
if __name__ == '__main__':
pass
|
jhmadhav/pynopticon
|
src/em/gauss_mix.py
|
Python
|
gpl-3.0
| 26,027
|
[
"Gaussian"
] |
bf194d7b2985e4bbc0716a7c129a918311e7ecea2a250d1d34fa8152bdde94fe
|
from __future__ import absolute_import
from __future__ import print_function
import unittest
import os
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.util.coord import in_coord_list, in_coord_list_pbc
from pymatgen.core.surface import generate_all_slabs
from pymatgen.analysis.adsorption import *
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen import Structure, Lattice, Molecule
import json
from six.moves import zip
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class AdsorbateSiteFinderTest(PymatgenTest):
def setUp(self):
self.structure = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.5),
["Ni"], [[0, 0, 0]])
slabs = generate_all_slabs(self.structure, max_index=2,
min_slab_size=6.0, min_vacuum_size=15.0,
max_normal_search=1, center_slab=True)
self.slab_dict = {''.join([str(i) for i in slab.miller_index]):
slab for slab in slabs}
self.asf_211 = AdsorbateSiteFinder(self.slab_dict["211"])
self.asf_100 = AdsorbateSiteFinder(self.slab_dict["100"])
self.asf_111 = AdsorbateSiteFinder(self.slab_dict["111"])
self.asf_111_bottom = AdsorbateSiteFinder(self.slab_dict["111"],
top_surface=False)
self.asf_110 = AdsorbateSiteFinder(self.slab_dict["110"])
def test_init(self):
asf_100 = AdsorbateSiteFinder(self.slab_dict["100"])
asf_111 = AdsorbateSiteFinder(self.slab_dict["111"])
def test_from_bulk_and_miller(self):
# Standard site finding
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 1))
sites = asf.find_adsorption_sites()
self.assertEqual(len(sites['hollow']), 2)
self.assertEqual(len(sites['bridge']), 1)
self.assertEqual(len(sites['ontop']), 1)
self.assertEqual(len(sites['all']), 4)
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 0, 0))
sites = asf.find_adsorption_sites()
self.assertEqual(len(sites['all']), 3)
self.assertEqual(len(sites['bridge']), 2)
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 0),
undercoord_threshold=0.1)
self.assertEqual(len(asf.surface_sites), 1)
# Subsurface site finding
asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 1))
sites = asf.find_adsorption_sites(positions=["ontop", "subsurface", "bridge"])
self.assertEqual(len(sites['all']), 4)
self.assertEqual(len(sites['subsurface']), 3)
def test_find_adsorption_sites(self):
sites = self.asf_100.find_adsorption_sites()
self.assertEqual(len(sites['all']), 3)
self.assertEqual(len(sites['hollow']), 0)
self.assertEqual(len(sites['bridge']), 2)
self.assertEqual(len(sites['ontop']), 1)
sites = self.asf_111.find_adsorption_sites()
self.assertEqual(len(sites['all']), 4)
sites = self.asf_110.find_adsorption_sites()
self.assertEqual(len(sites['all']), 4)
sites = self.asf_211.find_adsorption_sites()
def test_generate_adsorption_structures(self):
co = Molecule("CO", [[0, 0, 0], [0, 0, 1.23]])
structures = self.asf_111.generate_adsorption_structures(co, repeat=[2, 2, 1])
self.assertEqual(len(structures), 4)
sites = self.asf_111.find_adsorption_sites()
# Check repeat functionality
self.assertEqual(len([site for site in structures[0] if
site.properties['surface_properties'] != 'adsorbate']),
4*len(self.asf_111.slab))
for n, structure in enumerate(structures):
self.assertArrayAlmostEqual(structure[-2].coords, sites['all'][n])
find_args = {"positions":["hollow"]}
structures_hollow = self.asf_111.\
generate_adsorption_structures(co, find_args=find_args)
self.assertEqual(len(structures_hollow), len(sites['hollow']))
for n, structure in enumerate(structures_hollow):
self.assertTrue(in_coord_list(sites['hollow'], structure[-2].coords))
# checks if top_surface boolean will properly
# adsorb at the bottom surface when False
o = Molecule("O", [[0, 0, 0]])
adslabs = self.asf_111_bottom.generate_adsorption_structures(o)
for adslab in adslabs:
sites = sorted(adslab, key=lambda site: site.frac_coords[2])
self.assertTrue(sites[0].species_string == "O")
def test_adsorb_both_surfaces(self):
o = Molecule("O", [[0, 0, 0]])
adslabs = adsorb_both_surfaces(self.slab_dict["111"], o)
for adslab in adslabs:
sites = sorted(adslab, key=lambda site: site.frac_coords[2])
self.assertTrue(sites[0].species_string == "O")
self.assertTrue(sites[-1].species_string == "O")
self.assertTrue(adslab.is_symmetric())
def test_functions(self):
slab = self.slab_dict["111"]
rot = get_rot(slab)
reoriented = reorient_z(slab)
self.assertArrayAlmostEqual(slab.frac_coords[0],
cart_to_frac(slab.lattice,
slab.cart_coords[0]))
self.assertArrayAlmostEqual(slab.cart_coords[0],
frac_to_cart(slab.lattice,
slab.frac_coords[0]))
if __name__ == '__main__':
unittest.main()
|
matk86/pymatgen
|
pymatgen/analysis/tests/test_adsorption.py
|
Python
|
mit
| 5,781
|
[
"pymatgen"
] |
6ac5e08a4acca1d3b28db07ad2641c86c37fbd3af3d25b26d2ef39e3c4919504
|
# -*- coding: utf-8 -*-
#
# IRCrypt: Secure Encryption Layer Atop IRC
# =========================================
#
# Copyright (C) 2013-2014
# Lars Kiesow <lkiesow@uos.de>
# Sven Haardiek <sven@haardiek.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# == About ==================================================================
#
# The weechat IRCrypt script will send messages encrypted to all channels for
# which a passphrase is set. A channel can either be a regular IRC multi-user
# channel (i.e. #IRCrypt) or another users nickname.
#
# == Project ================================================================
#
# This script is part of the IRCrypt project. For mor information or to
# participate, please visit
#
# https://github.com/IRCrypt
#
#
# To report bugs, make suggestions, etc. for this particular plug-in, please
# have a look at:
#
# https://github.com/IRCrypt/ircrypt-weechat
#
import weechat
import subprocess
import base64
import time
# Constants used in this script
SCRIPT_NAME = 'ircrypt'
SCRIPT_AUTHOR = 'Sven Haardiek <sven@haardiek.de>, Lars Kiesow <lkiesow@uos.de>'
SCRIPT_VERSION = 'SNAPSHOT'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'IRCrypt: Encryption layer for IRC'
SCRIPT_HELP_TEXT = '''
%(bold)sIRCrypt command options: %(normal)s
list List set keys, public key ids and ciphers
set-key [-server <server>] <target> <key> Set key for target
remove-key [-server <server>] <target> Remove key for target
set-cipher [-server <server>] <target> <cipher> Set specific cipher for target
remove-cipher [-server <server>] <target> Remove specific cipher for target
plain [-server <s>] [-channel <ch>] <msg> Send unencrypted message
%(bold)sExamples: %(normal)s
Set the key for a channel:
/ircrypt set-key -server freenet #IRCrypt key
Remove the key:
/ircrypt remove-key #IRCrypt
Switch to a specific cipher for a channel:
/ircrypt set-cipher -server freenode #IRCrypt TWOFISH
Unset the specific cipher for a channel:
/ircrypt remove-cipher #IRCrypt
Send unencrypted “Hello” to current channel
/ircrypt plain Hello
%(bold)sConfiguration: %(normal)s
Tip: You can list all options and what they are currently set to by executing:
/set ircrypt.*
%(bold)sircrypt.marker.encrypted %(normal)s
If you add 'ircrypt' to weechat.bar.status.items, these option will set a
string which is displayed in the status bar of an encrypted channels,
indicating that the current channel is encrypted.
If “{{cipher}}” is used as part of this string, it will be replaced by the
cipher currently used by oneself for that particular channel.
It is woth noting that you probably don't want to replace the whole value of
that option but extend it instead in a way like:
/set weechat.bar.status.items {{currentContent}},ircrypt
%(bold)sircrypt.marker.unencrypted %(normal)s
This option will set a string which is displayed before each message that is
send unencrypted in a channel for which a key is set. So you know when
someone is talking to you without encryption.
%(bold)sircrypt.general.binary %(normal)s
This will set the GnuPG binary used for encryption and decryption. IRCrypt
will try to set this automatically.
''' % {'bold': weechat.color('bold'), 'normal': weechat.color('-bold')}
MAX_PART_LEN = 300
MSG_PART_TIMEOUT = 300 # 5min
# Global variables and memory used to store message parts, pending requests,
# configuration options, keys, etc.
ircrypt_msg_memory = {}
ircrypt_config_file = None
ircrypt_config_section = {}
ircrypt_config_option = {}
ircrypt_keys = {}
ircrypt_cipher = {}
ircrypt_message_plain = {}
class MessageParts:
'''Class used for storing parts of messages which were split after encryption
due to their length.'''
modified = 0
last_id = None
message = ''
def update(self, id, msg):
'''This method updates an already existing message part by adding a new
part to the old ones and updating the identifier of the latest received
message part.
'''
# Check if id is correct. If not, throw away old parts:
if self.last_id and self.last_id != id + 1:
self.message = ''
# Check if the are old message parts which belong due to their old age
# probably not to this message:
if time.time() - self.modified > MSG_PART_TIMEOUT:
self.message = ''
self.last_id = id
self.message = msg + self.message
self.modified = time.time()
def ircrypt_gnupg(stdin, *args):
'''Try to execute gpg with given input and options.
:param stdin: Input for GnuPG
:param args: Additional command line options for GnuPG
:returns: Tuple containing returncode, stdout and stderr
'''
gnupg = weechat.config_string(weechat.config_get('ircrypt.general.binary'))
if not gnupg:
return (99, '', 'GnuPG could not be found')
p = subprocess.Popen([gnupg, '--batch', '--no-tty'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate(stdin)
return (p.returncode, out, err)
def ircrypt_split_msg(cmd, pre, msg):
'''Convert encrypted message in MAX_PART_LEN sized blocks
'''
msg = msg.rstrip()
return '\n'.join(['%s:>%s-%i %s' %
(cmd, pre, i // MAX_PART_LEN, msg[i:i + MAX_PART_LEN])
for i in range(0, len(msg), MAX_PART_LEN)][::-1])
def ircrypt_error(msg, buf):
'''Print errors to a given buffer. Errors are printed in red and have the
weechat error prefix.
'''
weechat.prnt(buf, weechat.prefix('error') + weechat.color('red') +
('\n' + weechat.color('red')).join(msg.split('\n')))
def ircrypt_warn(msg, buf=''):
'''Print warnings. If no buffer is set, the default weechat buffer is used.
Warnin are printed in gray without marker.
'''
weechat.prnt(buf, weechat.color('gray') +
('\n' + weechat.color('gray')).join(msg.split('\n')))
def ircrypt_info(msg, buf=None):
'''Print ifo message to specified buffer. If no buffer is set, the current
foreground buffer is used to print the message.
'''
if buf is None:
buf = weechat.current_buffer()
weechat.prnt(buf, msg)
def ircrypt_decrypt_hook(data, msgtype, server, args):
'''Hook for incomming PRVMSG commands.
This method will parse the input, check if it is an encrypted message and
call the appropriate decryption methods if necessary.
:param data:
:param msgtype:
:param server: IRC server the message comes from.
:param args: IRC command line-
'''
info = weechat.info_get_hashtable('irc_message_parse', {'message': args})
# Check if channel is own nick and if change channel to nick of sender
if info['channel'][0] not in '#&':
info['channel'] = info['nick']
# Get key
key = ircrypt_keys.get(('%s/%s' % (server, info['channel'])).lower())
# Return everything as it is if we have no key
if not key:
return args
if '>CRY-' not in args:
# if key exisits and no >CRY not part of message flag message as unencrypted
pre, message = args.split(' :', 1)
marker = weechat.config_string(ircrypt_config_option['unencrypted'])
return '%s :%s %s' % (pre, marker, message)
# if key exists and >CRY part of message start symmetric encryption
pre, message = args.split('>CRY-', 1)
number, message = message.split(' ', 1)
# Get key for the message memory
catchword = '%s.%s.%s' % (server, info['channel'], info['nick'])
# Decrypt only if we got last part of the message
# otherwise put the message into a global memory and quit
if int(number) != 0:
if catchword not in ircrypt_msg_memory:
ircrypt_msg_memory[catchword] = MessageParts()
ircrypt_msg_memory[catchword].update(int(number), message)
return ''
# Get whole message
try:
message = message + ircrypt_msg_memory[catchword].message
del ircrypt_msg_memory[catchword]
except KeyError:
pass
# Get message buffer in case we need to print an error
buf = weechat.buffer_search('irc', '%s.%s' % (server, info['channel']))
# Decode base64 encoded message
try:
message = base64.b64decode(message)
except:
ircrypt_error('Could not Base64 decode message.', buf)
return args
# Decrypt
try:
message = (key).encode('utf-8') + b'\n' + message
except:
# For Python 2.x
message = key + b'\n' + message
(ret, out, err) = ircrypt_gnupg(message,
'--passphrase-fd', '-', '-q', '-d')
# Get and print GPG errors/warnings
if ret:
ircrypt_error(err.decode('utf-8'), buf)
return args
if err:
ircrypt_warn(err.decode('utf-8'))
# We expect IRC commands. Hence there should never be more than one line
out = out.replace(b'\r', b' ').replace(b'\n', b' ')
return pre + out.decode('utf-8')
def ircrypt_encrypt_hook(data, msgtype, server, args):
'''Hook for outgoing PRVMSG commands.
This method will call the appropriate methods for encrypting the outgoing
messages either symmetric or asymmetric
:param data:
:param msgtype:
:param server: IRC server the message comes from.
:param args: IRC command line-
'''
info = weechat.info_get_hashtable("irc_message_parse", {"message": args})
# check if this message is to be send as plain text
plain = ircrypt_message_plain.get('%s/%s' % (server, info['channel']))
if plain:
del ircrypt_message_plain['%s/%s' % (server, info['channel'])]
if (plain[0] - time.time()) < 5 \
and args == 'PRIVMSG %s :%s' % (info['channel'], plain[1]):
args = args.replace('PRIVMSG %s :%s ' % (
info['channel'],
weechat.config_string(ircrypt_config_option['unencrypted'])),
'PRIVMSG %s :' % info['channel'])
return args
# check symmetric key
key = ircrypt_keys.get(('%s/%s' % (server, info['channel'])).lower())
if not key:
# No key -> don't encrypt
return args
# Get cipher
cipher = ircrypt_cipher.get(('%s/%s' % (server, info['channel'])).lower(),
weechat.config_string(ircrypt_config_option['sym_cipher']))
# Get prefix and message
pre, message = args.split(':', 1)
# encrypt message
try:
inp = key.encode('utf-8') + b'\n' + message.encode('utf-8')
except:
inp = key + b'\n' + message
(ret, out, err) = ircrypt_gnupg(inp,
'--symmetric', '--cipher-algo', cipher, '--passphrase-fd', '-')
# Get and print GPG errors/warnings
if ret:
buf = weechat.buffer_search('irc', '%s.%s' % (server, info['channel']))
ircrypt_error(err.decode('utf-8'), buf)
return args
if err:
ircrypt_warn(err.decode('utf-8'))
# Ensure the generated messages are not too long and send them
return ircrypt_split_msg(pre, 'CRY', base64.b64encode(out).decode('utf-8'))
def ircrypt_config_init():
''' This method initializes the configuration file. It creates sections and
options in memory and prepares the handling of key sections.
'''
global ircrypt_config_file
ircrypt_config_file = weechat.config_new('ircrypt', 'ircrypt_config_reload_cb', '')
if not ircrypt_config_file:
return
# marker
ircrypt_config_section['marker'] = weechat.config_new_section(
ircrypt_config_file, 'marker', 0, 0, '', '', '', '', '', '', '', '',
'', '')
if not ircrypt_config_section['marker']:
weechat.config_free(ircrypt_config_file)
return
ircrypt_config_option['encrypted'] = weechat.config_new_option(
ircrypt_config_file, ircrypt_config_section['marker'],
'encrypted', 'string', 'Marker for encrypted messages', '', 0, 0,
'encrypted', 'encrypted', 0, '', '', '', '', '', '')
ircrypt_config_option['unencrypted'] = weechat.config_new_option(
ircrypt_config_file, ircrypt_config_section['marker'], 'unencrypted',
'string', 'Marker for unencrypted messages received in an encrypted channel',
'', 0, 0, '', 'u', 0, '', '', '', '', '', '')
# cipher options
ircrypt_config_section['cipher'] = weechat.config_new_section(
ircrypt_config_file, 'cipher', 0, 0, '', '', '', '', '', '', '', '',
'', '')
if not ircrypt_config_section['cipher']:
weechat.config_free(ircrypt_config_file)
return
ircrypt_config_option['sym_cipher'] = weechat.config_new_option(
ircrypt_config_file, ircrypt_config_section['cipher'],
'sym_cipher', 'string', 'symmetric cipher used by default', '', 0, 0,
'TWOFISH', 'TWOFISH', 0, '', '', '', '', '', '')
# general options
ircrypt_config_section['general'] = weechat.config_new_section(
ircrypt_config_file, 'general', 0, 0, '', '', '', '', '', '', '', '',
'', '')
if not ircrypt_config_section['general']:
weechat.config_free(ircrypt_config_file)
return
ircrypt_config_option['binary'] = weechat.config_new_option(
ircrypt_config_file, ircrypt_config_section['general'],
'binary', 'string', 'GnuPG binary to use', '', 0, 0,
'', '', 0, '', '', '', '', '', '')
# keys
ircrypt_config_section['keys'] = weechat.config_new_section(
ircrypt_config_file, 'keys', 0, 0, 'ircrypt_config_keys_read_cb', '',
'ircrypt_config_keys_write_cb', '', '', '', '', '', '', '')
if not ircrypt_config_section['keys']:
weechat.config_free(ircrypt_config_file)
# Special Ciphers
ircrypt_config_section['special_cipher'] = weechat.config_new_section(
ircrypt_config_file, 'special_cipher', 0, 0,
'ircrypt_config_special_cipher_read_cb', '',
'ircrypt_config_special_cipher_write_cb', '', '', '', '', '', '', '')
if not ircrypt_config_section['special_cipher']:
weechat.config_free(ircrypt_config_file)
def ircrypt_config_reload_cb(data, config_file):
'''Handle a reload of the configuration file.
'''
global ircrypt_keys, ircrypt_cipher
# Forget Keys and ciphers to make sure they are properly reloaded and no old
# ones are left
ircrypt_keys = {}
ircrypt_cipher = {}
return weechat.config_reload(config_file)
def ircrypt_config_read():
''' Read IRCrypt configuration file (ircrypt.conf).
'''
return weechat.config_read(ircrypt_config_file)
def ircrypt_config_write():
''' Write IRCrypt configuration file (ircrypt.conf) to disk.
'''
return weechat.config_write(ircrypt_config_file)
def ircrypt_config_keys_read_cb(data, config_file, section_name, option_name,
value):
'''Read elements of the key section from the configuration file.
'''
ircrypt_keys[option_name.lower()] = value
return weechat.WEECHAT_CONFIG_OPTION_SET_OK_CHANGED
def ircrypt_config_keys_write_cb(data, config_file, section_name):
'''Write passphrases to the key section of the configuration file.
'''
weechat.config_write_line(config_file, section_name, '')
for target, key in sorted(list(ircrypt_keys.items())):
weechat.config_write_line(config_file, target.lower(), key)
return weechat.WEECHAT_RC_OK
def ircrypt_config_special_cipher_read_cb(data, config_file, section_name,
option_name, value):
'''Read elements of the key section from the configuration file.
'''
ircrypt_cipher[option_name.lower()] = value
return weechat.WEECHAT_CONFIG_OPTION_SET_OK_CHANGED
def ircrypt_config_special_cipher_write_cb(data, config_file, section_name):
'''Write passphrases to the key section of the configuration file.
'''
weechat.config_write_line(config_file, section_name, '')
for target, cipher in sorted(list(ircrypt_cipher.items())):
weechat.config_write_line(config_file, target.lower(), cipher)
return weechat.WEECHAT_RC_OK
def ircrypt_command_list():
'''List set keys and channel specific ciphers.
'''
# List keys
keys = '\n'.join([' %s : %s' % x for x in ircrypt_keys.items()])
ircrypt_info('Symmetric Keys:\n' + keys if keys else 'No symmetric keys set')
# List channel specific ciphers
ciphers = '\n'.join([' %s : %s' % x for x in ircrypt_cipher.items()])
ircrypt_info('Special ciphers:\n' + ciphers if ciphers
else 'No special ciphers set')
return weechat.WEECHAT_RC_OK
def ircrypt_command_set_keys(target, key):
'''Set key for target.
:param target: server/channel combination
:param key: Key to use for target
'''
ircrypt_keys[target.lower()] = key
ircrypt_info('Set key for %s' % target)
return weechat.WEECHAT_RC_OK
def ircrypt_command_remove_keys(target):
'''Remove key for target.
:param target: server/channel combination
'''
try:
del ircrypt_keys[target.lower()]
ircrypt_info('Removed key for %s' % target)
except KeyError:
ircrypt_info('No existing key for %s.' % target)
return weechat.WEECHAT_RC_OK
def ircrypt_command_set_cip(target, cipher):
'''Set cipher for target.
:param target: server/channel combination
:param cipher: Cipher to use for target
'''
ircrypt_cipher[target.lower()] = cipher
ircrypt_info('Set cipher %s for %s' % (cipher, target))
return weechat.WEECHAT_RC_OK
def ircrypt_command_remove_cip(target):
'''Remove cipher for target.
:param target: server/channel combination
'''
try:
del ircrypt_cipher[target.lower()]
ircrypt_info('Removed special cipher. Using default cipher for %s instead.' % target)
except KeyError:
ircrypt_info('No special cipher set for %s.' % target)
return weechat.WEECHAT_RC_OK
def ircrypt_command_plain(buffer, server, args, argv):
'''Send unencrypted message
'''
channel = ''
if (len(argv) > 2 and argv[1] == '-channel'):
channel = argv[2]
args = (args.split(' ', 2) + [''])[2]
else:
# Try to determine the server automatically
channel = weechat.buffer_get_string(buffer, 'localvar_channel')
# If there is no text, just ignore the command
if not args:
return weechat.WEECHAT_RC_OK
marker = weechat.config_string(ircrypt_config_option['unencrypted'])
msg = marker + ' ' + args.split(' ', 1)[-1]
ircrypt_message_plain['%s/%s' % (server, channel)] = (time.time(), msg)
weechat.command('', '/msg -server %s %s %s' % (server, channel, msg))
return weechat.WEECHAT_RC_OK
def ircrypt_command(data, buffer, args):
'''Hook to handle the /ircrypt weechat command.
'''
argv = args.split()
# list
if not argv or argv == ['list']:
return ircrypt_command_list()
# Check if a server was set
if (len(argv) > 2 and argv[1] == '-server'):
server = argv[2]
del argv[1:3]
args = (args.split(' ', 2) + [''])[2]
else:
# Try to determine the server automatically
server = weechat.buffer_get_string(buffer, 'localvar_server')
# All remaining commands need a server name
if not server:
ircrypt_error('Unknown Server. Please use -server to specify server', buffer)
return weechat.WEECHAT_RC_ERROR
if argv[:1] == ['plain']:
return ircrypt_command_plain(buffer, server, args, argv)
try:
target = '%s/%s' % (server, argv[1])
except:
ircrypt_error('Unknown command. Try /help ircrypt', buffer)
return weechat.WEECHAT_RC_OK
# Set keys
if argv[:1] == ['set-key']:
if len(argv) < 3:
return weechat.WEECHAT_RC_ERROR
return ircrypt_command_set_keys(target, ' '.join(argv[2:]))
# Remove keys
if argv[:1] == ['remove-key']:
if len(argv) != 2:
return weechat.WEECHAT_RC_ERROR
return ircrypt_command_remove_keys(target)
# Set special cipher for channel
if argv[:1] == ['set-cipher']:
if len(argv) < 3:
return weechat.WEECHAT_RC_ERROR
return ircrypt_command_set_cip(target, ' '.join(argv[2:]))
# Remove secial cipher for channel
if argv[:1] == ['remove-cipher']:
if len(argv) != 2:
return weechat.WEECHAT_RC_ERROR
return ircrypt_command_remove_cip(target)
ircrypt_error('Unknown command. Try /help ircrypt', buffer)
return weechat.WEECHAT_RC_OK
def ircrypt_encryption_statusbar(*args):
'''This method will set the “ircrypt” element of the status bar if
encryption is enabled for the current channel. The placeholder {{cipher}}
can be used, which will be replaced with the cipher used for the current
channel.
'''
channel = weechat.buffer_get_string(weechat.current_buffer(), 'localvar_channel')
server = weechat.buffer_get_string(weechat.current_buffer(), 'localvar_server')
key = ircrypt_keys.get(('%s/%s' % (server, channel)).lower())
# Return nothing if no key is set for current channel
if not key:
return ''
# Get cipher used for current channel
cipher = weechat.config_string(ircrypt_config_option['sym_cipher'])
cipher = ircrypt_cipher.get(('%s/%s' % (server, channel)).lower(), cipher)
# Return marker, but replace {{cipher}}
marker = weechat.config_string(ircrypt_config_option['encrypted'])
return marker.replace('{{cipher}}', cipher)
def ircrypt_find_gpg_binary(names=('gpg2', 'gpg')):
'''Check for GnuPG binary to use
:returns: Tuple with binary name and version.
'''
for binary in names:
p = subprocess.Popen([binary, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
version = p.communicate()[0].decode('utf-8').split('\n', 1)[0]
if not p.returncode:
return binary, version
return None, None
def ircrypt_check_binary():
'''If binary is not set, try to determine it automatically
'''
cfg_option = weechat.config_get('ircrypt.general.binary')
gnupg = weechat.config_string(cfg_option)
if not gnupg:
(gnupg, version) = ircrypt_find_gpg_binary(('gpg', 'gpg2'))
if not gnupg:
ircrypt_error('Automatic detection of the GnuPG binary failed and '
'nothing is set manually. You wont be able to use IRCrypt like '
'this. Please install GnuPG or set the path to the binary to '
'use.', '')
else:
ircrypt_info('Found %s' % version, '')
weechat.config_option_set(cfg_option, gnupg, 1)
# register script
if __name__ == '__main__' and weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR,
SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, 'ircrypt_unload_script',
'UTF-8'):
# register the modifiers
ircrypt_config_init()
ircrypt_config_read()
ircrypt_check_binary()
weechat.hook_modifier('irc_in_privmsg', 'ircrypt_decrypt_hook', '')
weechat.hook_modifier('irc_out_privmsg', 'ircrypt_encrypt_hook', '')
weechat.hook_command('ircrypt', 'Commands to manage IRCrypt options and execute IRCrypt commands',
'[list]'
'| set-key [-server <server>] <target> <key> '
'| remove-key [-server <server>] <target> '
'| set-cipher [-server <server>] <target> <cipher> '
'| remove-cipher [-server <server>] <target> '
'| plain [-server <server>] [-channel <channel>] <message>',
SCRIPT_HELP_TEXT,
'list || set-key %(irc_channel)|%(nicks)|-server %(irc_servers) %- '
'|| remove-key %(irc_channel)|%(nicks)|-server %(irc_servers) %- '
'|| set-cipher %(irc_channel)|-server %(irc_servers) %- '
'|| remove-cipher |%(irc_channel)|-server %(irc_servers) %- '
'|| plain |-channel %(irc_channel)|-server %(irc_servers) %-',
'ircrypt_command', '')
weechat.bar_item_new('ircrypt', 'ircrypt_encryption_statusbar', '')
weechat.hook_signal('ircrypt_buffer_opened', 'update_encryption_status', '')
def ircrypt_unload_script():
'''Hook to ensure the configuration is properly written to disk when the
script is unloaded.
'''
ircrypt_config_write()
return weechat.WEECHAT_RC_OK
|
IRCrypt/ircrypt-weechat
|
ircrypt.py
|
Python
|
gpl-3.0
| 23,201
|
[
"VisIt"
] |
48252e61cfea28412c4935f20eead0a40052239fc80493ea5345ca7924535e96
|
"""
Define common steps for instructor dashboard acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from lettuce import world, step
from mock import patch
from nose.tools import assert_in # pylint: disable=no-name-in-module
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given I am "([^"]*)" for a very large course')
def make_staff_or_instructor_for_large_course(step, role):
make_large_course(step, role)
@patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0})
def make_large_course(step, role):
i_am_staff_or_instructor(step, role)
@step(u'Given I am "([^"]*)" for a course')
def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument
## In summary: makes a test course, makes a new Staff or Instructor user
## (depending on `role`), and logs that user in to the course
# Store the role
assert_in(role, ['instructor', 'staff'])
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='999',
display_name='Test Course'
)
world.course_key = course.id
world.role = 'instructor'
# Log in as the an instructor or staff for the course
if role == 'instructor':
# Make & register an instructor for the course
world.instructor = InstructorFactory(course_key=world.course_key)
world.enroll_user(world.instructor, world.course_key)
world.log_in(
username=world.instructor.username,
password='test',
email=world.instructor.email,
name=world.instructor.profile.name
)
else:
world.role = 'staff'
# Make & register a staff member
world.staff = StaffFactory(course_key=world.course_key)
world.enroll_user(world.staff, world.course_key)
world.log_in(
username=world.staff.username,
password='test',
email=world.staff.email,
name=world.staff.profile.name
)
def go_to_section(section_name):
# section name should be one of
# course_info, membership, student_admin, data_download, analytics, send_email
world.visit(u'/courses/{}'.format(world.course_key))
world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key))
world.css_click('a[data-section="{0}"]'.format(section_name))
@step(u'I click "([^"]*)"')
def click_a_button(step, button): # pylint: disable=unused-argument
if button == "Generate Grade Report":
# Go to the data download section of the instructor dash
go_to_section("data_download")
# Click generate grade report button
world.css_click('input[name="calculate-grades-csv"]')
# Expect to see a message that grade report is being generated
expected_msg = "Your grade report is being generated!" \
" You can view the status of the generation" \
" task in the 'Pending Tasks' section."
world.wait_for_visible('#report-request-response')
assert_in(
expected_msg, world.css_text('#report-request-response'),
msg="Could not find grade report generation success message."
)
elif button == "Grading Configuration":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="dump-gradeconf"]')
elif button == "List enrolled students' profile information":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles"]')
elif button == "Download profile information as a CSV":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles-csv"]')
else:
raise ValueError("Unrecognized button option " + button)
@step(u'I visit the "([^"]*)" tab')
def click_a_button(step, tab_name): # pylint: disable=unused-argument
# course_info, membership, student_admin, data_download, analytics, send_email
tab_name_dict = {
'Course Info': 'course_info',
'Membership': 'membership',
'Student Admin': 'student_admin',
'Data Download': 'data_download',
'Analytics': 'analytics',
'Email': 'send_email',
}
go_to_section(tab_name_dict[tab_name])
|
zadgroup/edx-platform
|
lms/djangoapps/instructor/features/common.py
|
Python
|
agpl-3.0
| 4,621
|
[
"VisIt"
] |
dc591d92814d2a5b818db76643cf3ff32fb74daed347356038871410f43d9aba
|
'''
Demonstrates optimizing the shape parameter for the RBFInterpolant when using
an RBF other than a polyharmonic spline
'''
import numpy as np
import matplotlib.pyplot as plt
from rbf.interpolate import RBFInterpolant
np.random.seed(0)
def frankes_test_function(x):
x1, x2 = x[:, 0], x[:, 1]
term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
y = term1 + term2 + term3 + term4
return y
phi = 'ga' # use a gaussian RBF
degree = 0 # degree of the added polynomial
y = np.random.random((100, 2)) # observation points
d = frankes_test_function(y) # observed values at y
x = np.mgrid[0:1:200j, 0:1:200j].reshape(2, -1).T # interpolation points
k = 5 # number of subgroups used for k-fold cross validation
def cross_validation(epsilon):
groups = [range(i, len(y), k) for i in range(k)]
error = 0.0
for i in range(k):
train = np.hstack([groups[j] for j in range(k) if j != i])
test = groups[i]
interp = RBFInterpolant(
y[train],
d[train],
phi=phi,
order=degree,
eps=epsilon
)
error += ((interp(y[test]) - d[test])**2).sum()
mse = error / len(y)
return mse
# range of epsilon values to test
test_epsilons = 10**np.linspace(-0.5, 2.5, 1000)
mses = [cross_validation(eps) for eps in test_epsilons]
best_mse = np.min(mses)
best_epsilon = test_epsilons[np.argmin(mses)]
print('best epsilon: %.2e (MSE=%.2e)' % (best_epsilon, best_mse))
interp = RBFInterpolant(y, d, phi=phi, order=degree, eps=best_epsilon)
fig, ax = plt.subplots()
ax.loglog(test_epsilons, mses, 'k-')
ax.grid(ls=':', color='k')
ax.set_xlabel(r'$\epsilon$')
ax.set_ylabel('cross validation MSE')
ax.plot(best_epsilon, best_mse, 'ko')
ax.text(
best_epsilon,
best_mse,
'(%.2e, %.2e)' % (best_epsilon, best_mse),
va='top'
)
fig.tight_layout()
fig, axs = plt.subplots(2, 1, figsize=(6, 8))
p = axs[0].tripcolor(x[:, 0], x[:, 1], interp(x))
axs[0].scatter(y[:, 0], y[:, 1], c='k', s=5)
axs[0].set_xlim(0, 1)
axs[0].set_ylim(0, 1)
axs[0].set_title(
'RBF interpolant ($\phi$=%s, degree=%s, $\epsilon$=%.2f)'
% (phi, degree, best_epsilon)
)
axs[0].set_xlabel('$x_0$')
axs[0].set_ylabel('$x_1$')
axs[0].grid(ls=':', color='k')
axs[0].set_aspect('equal')
fig.colorbar(p, ax=axs[0])
error = np.abs(interp(x) - frankes_test_function(x))
p = axs[1].tripcolor(x[:, 0], x[:, 1], error)
axs[1].scatter(y[:, 0], y[:, 1], c='k', s=5)
axs[1].set_xlim(0, 1)
axs[1].set_ylim(0, 1)
axs[1].set_title('|error|')
axs[1].set_xlabel('$x_0$')
axs[1].set_ylabel('$x_1$')
axs[1].grid(ls=':', color='k')
axs[1].set_aspect('equal')
fig.colorbar(p, ax=axs[1])
fig.tight_layout()
plt.show()
|
treverhines/RBF
|
docs/scripts/interpolate.d.py
|
Python
|
mit
| 2,870
|
[
"Gaussian"
] |
30ae533eff2b1a1d6b01e0d0bf8880c70a19b038a839c5172068d31312ee2b7d
|
"""Tests for the Amber Electric Data Coordinator."""
from __future__ import annotations
from collections.abc import Generator
from unittest.mock import Mock, patch
from amberelectric import ApiException
from amberelectric.model.channel import Channel, ChannelType
from amberelectric.model.current_interval import CurrentInterval
from amberelectric.model.interval import SpikeStatus
from amberelectric.model.site import Site
from dateutil import parser
import pytest
from homeassistant.components.amberelectric.coordinator import AmberUpdateCoordinator
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import UpdateFailed
from tests.components.amberelectric.helpers import (
CONTROLLED_LOAD_CHANNEL,
FEED_IN_CHANNEL,
GENERAL_AND_CONTROLLED_SITE_ID,
GENERAL_AND_FEED_IN_SITE_ID,
GENERAL_CHANNEL,
GENERAL_ONLY_SITE_ID,
generate_current_interval,
)
@pytest.fixture(name="current_price_api")
def mock_api_current_price() -> Generator:
"""Return an authentication error."""
instance = Mock()
general_site = Site(
GENERAL_ONLY_SITE_ID,
"11111111111",
[Channel(identifier="E1", type=ChannelType.GENERAL)],
)
general_and_controlled_load = Site(
GENERAL_AND_CONTROLLED_SITE_ID,
"11111111112",
[
Channel(identifier="E1", type=ChannelType.GENERAL),
Channel(identifier="E2", type=ChannelType.CONTROLLED_LOAD),
],
)
general_and_feed_in = Site(
GENERAL_AND_FEED_IN_SITE_ID,
"11111111113",
[
Channel(identifier="E1", type=ChannelType.GENERAL),
Channel(identifier="E2", type=ChannelType.FEED_IN),
],
)
instance.get_sites.return_value = [
general_site,
general_and_controlled_load,
general_and_feed_in,
]
with patch("amberelectric.api.AmberApi.create", return_value=instance):
yield instance
async def test_fetch_general_site(hass: HomeAssistant, current_price_api: Mock) -> None:
"""Test fetching a site with only a general channel."""
current_price_api.get_current_price.return_value = GENERAL_CHANNEL
data_service = AmberUpdateCoordinator(hass, current_price_api, GENERAL_ONLY_SITE_ID)
result = await data_service._async_update_data()
current_price_api.get_current_price.assert_called_with(
GENERAL_ONLY_SITE_ID, next=48
)
assert result["current"].get("general") == GENERAL_CHANNEL[0]
assert result["forecasts"].get("general") == [
GENERAL_CHANNEL[1],
GENERAL_CHANNEL[2],
GENERAL_CHANNEL[3],
]
assert result["current"].get("controlled_load") is None
assert result["forecasts"].get("controlled_load") is None
assert result["current"].get("feed_in") is None
assert result["forecasts"].get("feed_in") is None
assert result["grid"]["renewables"] == round(GENERAL_CHANNEL[0].renewables)
assert result["grid"]["price_spike"] == "none"
async def test_fetch_no_general_site(
hass: HomeAssistant, current_price_api: Mock
) -> None:
"""Test fetching a site with no general channel."""
current_price_api.get_current_price.return_value = CONTROLLED_LOAD_CHANNEL
data_service = AmberUpdateCoordinator(hass, current_price_api, GENERAL_ONLY_SITE_ID)
with pytest.raises(UpdateFailed):
await data_service._async_update_data()
current_price_api.get_current_price.assert_called_with(
GENERAL_ONLY_SITE_ID, next=48
)
async def test_fetch_api_error(hass: HomeAssistant, current_price_api: Mock) -> None:
"""Test that the old values are maintained if a second call fails."""
current_price_api.get_current_price.return_value = GENERAL_CHANNEL
data_service = AmberUpdateCoordinator(hass, current_price_api, GENERAL_ONLY_SITE_ID)
result = await data_service._async_update_data()
current_price_api.get_current_price.assert_called_with(
GENERAL_ONLY_SITE_ID, next=48
)
assert result["current"].get("general") == GENERAL_CHANNEL[0]
assert result["forecasts"].get("general") == [
GENERAL_CHANNEL[1],
GENERAL_CHANNEL[2],
GENERAL_CHANNEL[3],
]
assert result["current"].get("controlled_load") is None
assert result["forecasts"].get("controlled_load") is None
assert result["current"].get("feed_in") is None
assert result["forecasts"].get("feed_in") is None
assert result["grid"]["renewables"] == round(GENERAL_CHANNEL[0].renewables)
current_price_api.get_current_price.side_effect = ApiException(status=403)
with pytest.raises(UpdateFailed):
await data_service._async_update_data()
assert result["current"].get("general") == GENERAL_CHANNEL[0]
assert result["forecasts"].get("general") == [
GENERAL_CHANNEL[1],
GENERAL_CHANNEL[2],
GENERAL_CHANNEL[3],
]
assert result["current"].get("controlled_load") is None
assert result["forecasts"].get("controlled_load") is None
assert result["current"].get("feed_in") is None
assert result["forecasts"].get("feed_in") is None
assert result["grid"]["renewables"] == round(GENERAL_CHANNEL[0].renewables)
assert result["grid"]["price_spike"] == "none"
async def test_fetch_general_and_controlled_load_site(
hass: HomeAssistant, current_price_api: Mock
) -> None:
"""Test fetching a site with a general and controlled load channel."""
current_price_api.get_current_price.return_value = (
GENERAL_CHANNEL + CONTROLLED_LOAD_CHANNEL
)
data_service = AmberUpdateCoordinator(
hass, current_price_api, GENERAL_AND_CONTROLLED_SITE_ID
)
result = await data_service._async_update_data()
current_price_api.get_current_price.assert_called_with(
GENERAL_AND_CONTROLLED_SITE_ID, next=48
)
assert result["current"].get("general") == GENERAL_CHANNEL[0]
assert result["forecasts"].get("general") == [
GENERAL_CHANNEL[1],
GENERAL_CHANNEL[2],
GENERAL_CHANNEL[3],
]
assert result["current"].get("controlled_load") is CONTROLLED_LOAD_CHANNEL[0]
assert result["forecasts"].get("controlled_load") == [
CONTROLLED_LOAD_CHANNEL[1],
CONTROLLED_LOAD_CHANNEL[2],
CONTROLLED_LOAD_CHANNEL[3],
]
assert result["current"].get("feed_in") is None
assert result["forecasts"].get("feed_in") is None
assert result["grid"]["renewables"] == round(GENERAL_CHANNEL[0].renewables)
assert result["grid"]["price_spike"] == "none"
async def test_fetch_general_and_feed_in_site(
hass: HomeAssistant, current_price_api: Mock
) -> None:
"""Test fetching a site with a general and feed_in channel."""
current_price_api.get_current_price.return_value = GENERAL_CHANNEL + FEED_IN_CHANNEL
data_service = AmberUpdateCoordinator(
hass, current_price_api, GENERAL_AND_FEED_IN_SITE_ID
)
result = await data_service._async_update_data()
current_price_api.get_current_price.assert_called_with(
GENERAL_AND_FEED_IN_SITE_ID, next=48
)
assert result["current"].get("general") == GENERAL_CHANNEL[0]
assert result["forecasts"].get("general") == [
GENERAL_CHANNEL[1],
GENERAL_CHANNEL[2],
GENERAL_CHANNEL[3],
]
assert result["current"].get("controlled_load") is None
assert result["forecasts"].get("controlled_load") is None
assert result["current"].get("feed_in") is FEED_IN_CHANNEL[0]
assert result["forecasts"].get("feed_in") == [
FEED_IN_CHANNEL[1],
FEED_IN_CHANNEL[2],
FEED_IN_CHANNEL[3],
]
assert result["grid"]["renewables"] == round(GENERAL_CHANNEL[0].renewables)
assert result["grid"]["price_spike"] == "none"
async def test_fetch_potential_spike(
hass: HomeAssistant, current_price_api: Mock
) -> None:
"""Test fetching a site with only a general channel."""
general_channel: list[CurrentInterval] = [
generate_current_interval(
ChannelType.GENERAL, parser.parse("2021-09-21T08:30:00+10:00")
),
]
general_channel[0].spike_status = SpikeStatus.POTENTIAL
current_price_api.get_current_price.return_value = general_channel
data_service = AmberUpdateCoordinator(hass, current_price_api, GENERAL_ONLY_SITE_ID)
result = await data_service._async_update_data()
assert result["grid"]["price_spike"] == "potential"
async def test_fetch_spike(hass: HomeAssistant, current_price_api: Mock) -> None:
"""Test fetching a site with only a general channel."""
general_channel: list[CurrentInterval] = [
generate_current_interval(
ChannelType.GENERAL, parser.parse("2021-09-21T08:30:00+10:00")
),
]
general_channel[0].spike_status = SpikeStatus.SPIKE
current_price_api.get_current_price.return_value = general_channel
data_service = AmberUpdateCoordinator(hass, current_price_api, GENERAL_ONLY_SITE_ID)
result = await data_service._async_update_data()
assert result["grid"]["price_spike"] == "spike"
|
rohitranjan1991/home-assistant
|
tests/components/amberelectric/test_coordinator.py
|
Python
|
mit
| 9,047
|
[
"Amber"
] |
933b418e24e3b9ebc8cdc11216c59fadea79146700dacc957c5a9b92251b7223
|
# numkit --- data fitting
# Copyright (c) 2010 Oliver Beckstein <orbeckst@gmail.com>
# Released under the "Modified BSD Licence" (see COPYING).
"""
:mod:`numkit.fitting` --- Fitting data
======================================
The module contains functions to do least square fits of functions of
one variable f(x) to data points (x,y).
Example
-------
For example, to fit a un-normalized Gaussian with :class:`FitGauss` to
data distributed with mean 5.0 and standard deviation 3.0::
from numkit.fitting import FitGauss
import numpy, numpy.random
# generate suitably noisy data
mu, sigma = 5.0, 3.0
Y,edges = numpy.histogram(sigma*numpy.random.randn(10000), bins=100, density=True)
X = 0.5*(edges[1:]+edges[:-1]) + mu
g = FitGauss(X, Y)
print(g.parameters)
# [ 4.98084541 3.00044102 1.00069061]
print(numpy.array([mu, sigma, 1]) - g.parameters)
# [ 0.01915459 -0.00044102 -0.00069061]
import matplotlib.pyplot as plt
plt.plot(X, Y, 'ko', label="data")
plt.plot(X, g.fit(X), 'r-', label="fit")
.. figure:: /numkit/FitGauss.png
:scale: 40 %
:alt: Gaussian fit with data points
A Gaussian (red) was fit to the data points (black circles) with
the :class:`numkit.fitting.FitGauss` class.
If the initial parameters for the least square optimization do not
lead to a solution then one can provide customized starting values in
the *parameters* keyword argument::
g = FitGauss(X, Y, parameters=[10, 1, 1])
The *parameters* have different meaning for the different fit
functions; the documentation for each function shows them in the
context of the fit function.
Creating new fit functions
--------------------------
New fit function classes can be derived from :class:`FitFunc`. The
documentation and the methods :meth:`FitFunc.f_factory` and
:meth:`FitFunc.initial_values` must be overriden. For example, the
class :class:`FitGauss` is implemented as ::
class FitGauss(FitFunc):
'''y = f(x) = p[2] * 1/sqrt(2*pi*p[1]**2) * exp(-(x-p[0])**2/(2*p[1]**2))'''
def f_factory(self):
def fitfunc(p,x):
return p[2] * 1.0/(p[1]*numpy.sqrt(2*numpy.pi)) * numpy.exp(-(x-p[0])**2/(2*p[1]**2))
return fitfunc
def initial_values(self):
return [0.0,1.0,0.0]
The function to be fitted is defined in :func:`fitfunc`. The
parameters are accessed as ``p[0]``, ``p[1]``, ... For each parameter,
a suitable initial value must be provided.
Functions and classes
---------------------
.. autofunction:: Pearson_r
.. autofunction:: linfit
.. autoclass:: FitFunc
:members:
.. autoclass:: FitLin
.. autoclass:: FitExp
.. autoclass:: FitExp2
.. autoclass:: FitGauss
"""
import numpy
import logging
logger = logging.getLogger("numkit.fitting")
def Pearson_r(x,y):
"""Pearson's r (correlation coefficient).
Pearson(x,y) --> correlation coefficient
*x* and *y* are arrays of same length.
Historical note -- Naive implementation of Pearson's r ::
Ex = scipy.stats.mean(x)
Ey = scipy.stats.mean(y)
covxy = numpy.sum((x-Ex)*(y-Ey))
r = covxy/math.sqrt(numpy.sum((x-Ex)**2)*numpy.sum((y-Ey)**2))
"""
return numpy.corrcoef(x,y)[1,0]
def linfit(x,y,dy=None):
"""Fit a straight line y = a + bx to the data in *x* and *y*.
Errors on y should be provided in dy in order to assess the
goodness of the fit and derive errors on the parameters.
linfit(x,y[,dy]) --> result_dict
Fit y = a + bx to the data in x and y by analytically minimizing
chi^2. dy holds the standard deviations of the individual y_i. If
dy is not given, they are assumed to be constant (note that in
this case Q is set to 1 and it is meaningless and chi2 is
normalised to unit standard deviation on all points!).
Returns the parameters a and b, their uncertainties sigma_a and
sigma_b, and their correlation coefficient r_ab; it also returns
the chi-squared statistic and the goodness-of-fit probability Q
(that the fit would have chi^2 this large or larger; Q < 10^-2
indicates that the model is bad --- Q is the probability that a
value of chi-square as _poor_ as the calculated statistic chi2
should occur by chance.)
:Returns: result_dict with components
intercept, sigma_intercept
a +/- sigma_a
slope, sigma_slope
b +/- sigma_b
parameter_correlation
correlation coefficient r_ab between a and b
chi_square
chi^2 test statistic
Q_fit
goodness-of-fit probability
Based on 'Numerical Recipes in C', Ch 15.2.
"""
if dy is None:
dy = []
import scipy.stats
n = len(x)
m = len(y)
if n != m:
raise ValueError("lengths of x and y must match: %s != %s" % (n, m))
try:
have_dy = (len(dy) > 0)
except TypeError:
have_dy = False
if not have_dy:
dy = numpy.ones((n),numpy.float)
x = numpy.asarray(x)
y = numpy.asarray(y)
dy = numpy.asarray(dy)
s2 = dy*dy
S = numpy.add.reduce(1/s2)
Sx = numpy.add.reduce(x/s2)
Sy = numpy.add.reduce(y/s2)
Sxx = numpy.add.reduce(x*x/s2)
Sxy = numpy.add.reduce(x*y/s2)
t = (x - Sx/S)/dy
Stt = numpy.add.reduce(t*t)
b = numpy.add.reduce(t*y/dy)/Stt
a = (Sy - Sx*b)/S
sa = numpy.sqrt((1 + (Sx*Sx)/(S*Stt))/S)
sb = numpy.sqrt(1/Stt)
covab = -Sx/(S*Stt)
r = covab/(sa*sb)
chi2 = numpy.add.reduce(((y-a-b*x)/dy)**2)
if not have_dy:
# estimate error if none were provided
sigmadata = numpy.sqrt(chi2/(n-2))
sa *= sigmadata
sb *= sigmadata
Q = 1.0
else:
Q = scipy.stats.chisqprob(chi2,n-2)
return {"intercept":a,"slope":b,
"sigma_intercept":sa,"sigma_slope":sb,
"parameter_correlation":r, "chi_square":chi2, "Q":Q}
class FitFunc(object):
"""Fit a function f to data (x,y) using the method of least squares.
The function is fitted when the object is created, using
:func:`scipy.optimize.leastsq`. One must derive from the base class
:class:`FitFunc` and override the :meth:`FitFunc.f_factory` (including
the definition of an appropriate local :func:`fitfunc` function) and
:meth:`FitFunc.initial_values` appropriately. See the examples for a
linear fit :class:`FitLin`, a 1-parameter exponential fit :class:`FitExp`,
or a 3-parameter double exponential fit :class:`FitExp2`.
The object provides two attributes
:attr:`FitFunc.parameters`
list of parameters of the fit
:attr:`FitFunc.message`
message from :func:`scipy.optimize.leastsq`
After a successful fit, the fitted function can be applied to any data (a
1D-numpy array) with :meth:`FitFunc.fit`.
"""
def __init__(self,x,y,parameters=None):
import scipy.optimize
_x = numpy.asarray(x)
_y = numpy.asarray(y)
p0 = self._get_initial_values(parameters)
fitfunc = self.f_factory()
def errfunc(p,x,y):
return fitfunc(p,x) - y # residuals
p,msg = scipy.optimize.leastsq(errfunc,p0[:],args=(_x,_y))
try:
p[0]
self.parameters = p
except (TypeError,IndexError,):
# TypeError for int p, IndexError for numpy scalar (new scipy)
self.parameters = [p]
self.message = msg
def f_factory(self):
"""Stub for fit function factory, which returns the fit function.
Override for derived classes.
"""
def fitfunc(p,x):
# return f(p,x); should be a numpy ufunc
raise NotImplementedError("base class must be extended for each fit function")
return fitfunc
def _get_initial_values(self, parameters=None):
p0 = numpy.asarray(self.initial_values())
if parameters is not None:
try:
p0[:] = parameters
except ValueError:
raise ValueError("Wrong number of custom initital values %r, should be something like %r" % (parameters, p0))
return p0
def initial_values(self):
"""List of initital guesses for all parameters p[]"""
# return [1.0, 2.0, 0.5]
raise NotImplementedError("base class must be extended for each fit function")
def fit(self,x):
"""Applies the fit to all *x* values"""
fitfunc = self.f_factory()
return fitfunc(self.parameters,numpy.asarray(x))
class FitExp(FitFunc):
"""y = f(x) = exp(-p[0]*x)"""
def f_factory(self):
def fitfunc(p,x):
return numpy.exp(-p[0]*x) # exp(-B*x)
return fitfunc
def initial_values(self):
return [1.0]
def __repr__(self):
return "<FitExp "+str(self.parameters)+">"
class FitExp2(FitFunc):
"""y = f(x) = p[0]*exp(-p[1]*x) + (1-p[0])*exp(-p[2]*x)"""
def f_factory(self):
def fitfunc(p,x):
return p[0]*numpy.exp(-p[1]*x) + (1-p[0])*numpy.exp(-p[2]*x)
return fitfunc
def initial_values(self):
return [0.5,0.1,1e-4]
def __repr__(self):
return "<FitExp2"+str(self.parameters)+">"
class FitLin(FitFunc):
"""y = f(x) = p[0]*x + p[1]"""
def f_factory(self):
def fitfunc(p,x):
return p[0]*x + p[1]
return fitfunc
def initial_values(self):
return [1.0,0.0]
def __repr__(self):
return "<FitLin"+str(self.parameters)+">"
class FitGauss(FitFunc):
"""y = f(x) = p[2] * 1/sqrt(2*pi*p[1]**2) * exp(-(x-p[0])**2/(2*p[1]**2))
Fits an un-normalized gaussian (height scaled with parameter p[2]).
* p[0] == mean $\mu$
* p[1] == standard deviation $\sigma$
* p[2] == scale $a$
"""
def f_factory(self):
def fitfunc(p,x):
return p[2] * 1.0/(p[1]*numpy.sqrt(2*numpy.pi)) * numpy.exp(-(x-p[0])**2/(2*p[1]**2))
return fitfunc
def initial_values(self):
return [0.0,1.0,0.0]
def __repr__(self):
return "<FitGauss"+str(self.parameters)+">"
|
pslacerda/GromacsWrapper
|
numkit/fitting.py
|
Python
|
gpl-3.0
| 10,062
|
[
"Gaussian"
] |
44310661ba953ac4bfe9b6d68da12104e3bc89c3e505e16cc0ab14776d93f786
|
"""
RPN - Region Proposal Network
"""
import sonnet as snt
import tensorflow as tf
from sonnet.python.modules.conv import Conv2D
from .rpn_target import RPNTarget
from .rpn_proposal import RPNProposal
from luminoth.utils.losses import smooth_l1_loss
from luminoth.utils.vars import (
get_initializer, layer_summaries, variable_summaries,
get_activation_function
)
class RPN(snt.AbstractModule):
def __init__(self, num_anchors, config, debug=False, seed=None,
name='rpn'):
"""RPN - Region Proposal Network.
Given an image (as feature map) and a fixed set of anchors, the RPN
will learn weights to adjust those anchors so they better look like the
ground truth objects, as well as scoring them by "objectness" (ie. how
likely they are to be an object vs background).
The final result will be a set of rectangular boxes ("proposals"),
each associated with an objectness score.
Note: this module can be used independently of Faster R-CNN.
"""
super(RPN, self).__init__(name=name)
self._num_anchors = num_anchors
self._num_channels = config.num_channels
self._kernel_shape = config.kernel_shape
self._debug = debug
self._seed = seed
self._rpn_initializer = get_initializer(
config.rpn_initializer, seed=seed
)
# According to Faster RCNN paper we need to initialize layers with
# "from a zero-mean Gaussian distribution with standard deviation 0.01
self._cls_initializer = get_initializer(
config.cls_initializer, seed=seed
)
self._bbox_initializer = get_initializer(
config.bbox_initializer, seed=seed
)
self._regularizer = tf.contrib.layers.l2_regularizer(
scale=config.l2_regularization_scale
)
self._l1_sigma = config.l1_sigma
# We could use normal relu without any problems.
self._rpn_activation = get_activation_function(
config.activation_function
)
self._config = config
def _instantiate_layers(self):
"""Instantiates all convolutional modules used in the RPN."""
self._rpn = Conv2D(
output_channels=self._num_channels,
kernel_shape=self._kernel_shape,
initializers={'w': self._rpn_initializer},
regularizers={'w': self._regularizer},
name='conv'
)
self._rpn_cls = Conv2D(
output_channels=self._num_anchors * 2, kernel_shape=[1, 1],
initializers={'w': self._cls_initializer},
regularizers={'w': self._regularizer},
padding='VALID', name='cls_conv'
)
# BBox prediction is 4 values * number of anchors.
self._rpn_bbox = Conv2D(
output_channels=self._num_anchors * 4, kernel_shape=[1, 1],
initializers={'w': self._bbox_initializer},
regularizers={'w': self._regularizer},
padding='VALID', name='bbox_conv'
)
def _build(self, conv_feature_map, im_shape, all_anchors,
gt_boxes=None, is_training=False):
"""Builds the RPN model subgraph.
Args:
conv_feature_map: A Tensor with the output of some pretrained
network. Its dimensions should be
`[1, feature_map_height, feature_map_width, depth]` where depth
is 512 for the default layer in VGG and 1024 for the default
layer in ResNet.
im_shape: A Tensor with the shape of the original image.
all_anchors: A Tensor with all the anchor bounding boxes. Its shape
should be
[feature_map_height * feature_map_width * total_anchors, 4]
gt_boxes: A Tensor with the ground-truth boxes for the image.
Its dimensions should be `[total_gt_boxes, 5]`, and it should
consist of [x1, y1, x2, y2, label], being (x1, y1) -> top left
point, and (x2, y2) -> bottom right point of the bounding box.
Returns:
prediction_dict: A dict with the following keys:
proposals: A Tensor with a variable number of proposals for
objects on the image.
scores: A Tensor with a "objectness" probability for each
proposal. The score should be the output of the softmax for
object.
If training is True, then some more Tensors are added to the
prediction dictionary to be used for calculating the loss.
rpn_cls_prob: A Tensor with the probability of being
background and foreground for each anchor.
rpn_cls_score: A Tensor with the cls score of being background
and foreground for each anchor (the input for the softmax).
rpn_bbox_pred: A Tensor with the bounding box regression for
each anchor.
rpn_cls_target: A Tensor with the target for each of the
anchors. The shape is [num_anchors,].
rpn_bbox_target: A Tensor with the target for each of the
anchors. In case of ignoring the anchor for the target then
we still have a bbox target for each anchors, and it's
filled with zeroes when ignored.
"""
# We start with a common conv layer applied to the feature map.
self._instantiate_layers()
self._proposal = RPNProposal(
self._num_anchors, self._config.proposals, debug=self._debug
)
self._anchor_target = RPNTarget(
self._num_anchors, self._config.target, seed=self._seed
)
prediction_dict = {}
# Get the RPN feature using a simple conv net. Activation function
# can be set to empty.
rpn_conv_feature = self._rpn(conv_feature_map)
rpn_feature = self._rpn_activation(rpn_conv_feature)
# Then we apply separate conv layers for classification and regression.
rpn_cls_score_original = self._rpn_cls(rpn_feature)
rpn_bbox_pred_original = self._rpn_bbox(rpn_feature)
# rpn_cls_score_original has shape (1, H, W, num_anchors * 2)
# rpn_bbox_pred_original has shape (1, H, W, num_anchors * 4)
# where H, W are height and width of the pretrained feature map.
# Convert (flatten) `rpn_cls_score_original` which has two scalars per
# anchor per location to be able to apply softmax.
rpn_cls_score = tf.reshape(rpn_cls_score_original, [-1, 2])
# Now that `rpn_cls_score` has shape (H * W * num_anchors, 2), we apply
# softmax to the last dim.
rpn_cls_prob = tf.nn.softmax(rpn_cls_score)
prediction_dict['rpn_cls_prob'] = rpn_cls_prob
prediction_dict['rpn_cls_score'] = rpn_cls_score
# Flatten bounding box delta prediction for easy manipulation.
# We end up with `rpn_bbox_pred` having shape (H * W * num_anchors, 4).
rpn_bbox_pred = tf.reshape(rpn_bbox_pred_original, [-1, 4])
prediction_dict['rpn_bbox_pred'] = rpn_bbox_pred
# We have to convert bbox deltas to usable bounding boxes and remove
# redundant ones using Non Maximum Suppression (NMS).
proposal_prediction = self._proposal(
rpn_cls_prob, rpn_bbox_pred, all_anchors, im_shape)
prediction_dict['proposals'] = proposal_prediction['proposals']
prediction_dict['scores'] = proposal_prediction['scores']
if self._debug:
prediction_dict['proposal_prediction'] = proposal_prediction
if gt_boxes is not None:
# When training we use a separate module to calculate the target
# values we want to output.
(rpn_cls_target, rpn_bbox_target,
rpn_max_overlap) = self._anchor_target(
all_anchors, gt_boxes, im_shape
)
prediction_dict['rpn_cls_target'] = rpn_cls_target
prediction_dict['rpn_bbox_target'] = rpn_bbox_target
if self._debug:
prediction_dict['rpn_max_overlap'] = rpn_max_overlap
variable_summaries(rpn_bbox_target, 'rpn_bbox_target', 'full')
# Variables summaries.
variable_summaries(prediction_dict['scores'], 'rpn_scores', 'reduced')
variable_summaries(rpn_cls_prob, 'rpn_cls_prob', 'reduced')
variable_summaries(rpn_bbox_pred, 'rpn_bbox_pred', 'reduced')
if self._debug:
variable_summaries(rpn_feature, 'rpn_feature', 'full')
variable_summaries(
rpn_cls_score_original, 'rpn_cls_score_original', 'full')
variable_summaries(
rpn_bbox_pred_original, 'rpn_bbox_pred_original', 'full')
# Layer summaries.
layer_summaries(self._rpn, 'full')
layer_summaries(self._rpn_cls, 'full')
layer_summaries(self._rpn_bbox, 'full')
return prediction_dict
def loss(self, prediction_dict):
"""
Returns cost for Region Proposal Network based on:
Args:
rpn_cls_score: Score for being an object or not for each anchor
in the image. Shape: (num_anchors, 2)
rpn_cls_target: Ground truth labeling for each anchor. Should be
* 1: for positive labels
* 0: for negative labels
* -1: for labels we should ignore.
Shape: (num_anchors, )
rpn_bbox_target: Bounding box output delta target for rpn.
Shape: (num_anchors, 4)
rpn_bbox_pred: Bounding box output delta prediction for rpn.
Shape: (num_anchors, 4)
Returns:
Multiloss between cls probability and bbox target.
"""
rpn_cls_score = prediction_dict['rpn_cls_score']
rpn_cls_target = prediction_dict['rpn_cls_target']
rpn_bbox_target = prediction_dict['rpn_bbox_target']
rpn_bbox_pred = prediction_dict['rpn_bbox_pred']
with tf.variable_scope('RPNLoss'):
# Flatten already flat Tensor for usage as boolean mask filter.
rpn_cls_target = tf.cast(tf.reshape(
rpn_cls_target, [-1]), tf.int32, name='rpn_cls_target')
# Transform to boolean tensor mask for not ignored.
labels_not_ignored = tf.not_equal(
rpn_cls_target, -1, name='labels_not_ignored')
# Now we only have the labels we are going to compare with the
# cls probability.
labels = tf.boolean_mask(rpn_cls_target, labels_not_ignored)
cls_score = tf.boolean_mask(rpn_cls_score, labels_not_ignored)
# We need to transform `labels` to `cls_score` shape.
# convert [1, 0] to [[0, 1], [1, 0]] for ce with logits.
cls_target = tf.one_hot(labels, depth=2)
# Equivalent to log loss
ce_per_anchor = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=cls_target, logits=cls_score
)
prediction_dict['cross_entropy_per_anchor'] = ce_per_anchor
# Finally, we need to calculate the regression loss over
# `rpn_bbox_target` and `rpn_bbox_pred`.
# We use SmoothL1Loss.
rpn_bbox_target = tf.reshape(rpn_bbox_target, [-1, 4])
rpn_bbox_pred = tf.reshape(rpn_bbox_pred, [-1, 4])
# We only care for positive labels (we ignore backgrounds since
# we don't have any bounding box information for it).
positive_labels = tf.equal(rpn_cls_target, 1)
rpn_bbox_target = tf.boolean_mask(rpn_bbox_target, positive_labels)
rpn_bbox_pred = tf.boolean_mask(rpn_bbox_pred, positive_labels)
# We apply smooth l1 loss as described by the Fast R-CNN paper.
reg_loss_per_anchor = smooth_l1_loss(
rpn_bbox_pred, rpn_bbox_target, sigma=self._l1_sigma
)
prediction_dict['reg_loss_per_anchor'] = reg_loss_per_anchor
# Loss summaries.
tf.summary.scalar('batch_size', tf.shape(labels)[0], ['rpn'])
foreground_cls_loss = tf.boolean_mask(
ce_per_anchor, tf.equal(labels, 1))
background_cls_loss = tf.boolean_mask(
ce_per_anchor, tf.equal(labels, 0))
tf.summary.scalar(
'foreground_cls_loss',
tf.reduce_mean(foreground_cls_loss), ['rpn'])
tf.summary.histogram(
'foreground_cls_loss', foreground_cls_loss, ['rpn'])
tf.summary.scalar(
'background_cls_loss',
tf.reduce_mean(background_cls_loss), ['rpn'])
tf.summary.histogram(
'background_cls_loss', background_cls_loss, ['rpn'])
tf.summary.scalar(
'foreground_samples', tf.shape(rpn_bbox_target)[0], ['rpn'])
return {
'rpn_cls_loss': tf.reduce_mean(ce_per_anchor),
'rpn_reg_loss': tf.reduce_mean(reg_loss_per_anchor),
}
|
tryolabs/luminoth
|
luminoth/models/fasterrcnn/rpn.py
|
Python
|
bsd-3-clause
| 13,279
|
[
"Gaussian"
] |
c8c231ab033614bc45592e7d9703305de7b70efee42367e7ec3a90c30c33367c
|
# Orca
#
# Copyright 2010-2011 Consorcio Fernando de los Rios.
# Author: Juanje Ojeda Croissier <jojeda@emergya.es>
# Author: Javier Hernandez Antunez <jhernandez@emergya.es>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""JSON backend for Orca settings"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010-2011 Consorcio Fernando de los Rios."
__license__ = "LGPL"
from json import load, dump
import os
from orca import settings, acss
class Backend:
def __init__(self, prefsDir):
""" Initialize the JSON Backend.
"""
self.general = {}
self.pronunciations = {}
self.keybindings = {}
self.profiles = {}
self.settingsFile = os.path.join(prefsDir, "user-settings.conf")
self.appPrefsDir = os.path.join(prefsDir, "app-settings")
def saveDefaultSettings(self, general, pronunciations, keybindings):
""" Save default settings for all the properties from
orca.settings. """
defaultProfiles = {'default': { 'profile': settings.profile,
'pronunciations': {},
'keybindings': {}
}
}
prefs = {'general': general,
'profiles': defaultProfiles,
'pronunciations': pronunciations,
'keybindings': keybindings}
self.general = general
self.profiles = defaultProfiles
self.pronunciations = pronunciations
self.keybindings = keybindings
settingsFile = open(self.settingsFile, 'w')
dump(prefs, settingsFile, indent=4)
settingsFile.close()
def getAppSettings(self, appName):
fileName = os.path.join(self.appPrefsDir, "%s.conf" % appName)
if os.path.exists(fileName):
settingsFile = open(fileName, 'r')
prefs = load(settingsFile)
settingsFile.close()
else:
prefs = {}
return prefs
def saveAppSettings(self, appName, profile, general, pronunciations, keybindings):
prefs = self.getAppSettings(appName)
profiles = prefs.get('profiles', {})
profiles[profile] = {'general': general,
'pronunciations': pronunciations,
'keybindings': keybindings}
prefs['profiles'] = profiles
fileName = os.path.join(self.appPrefsDir, "%s.conf" % appName)
settingsFile = open(fileName, 'w')
dump(prefs, settingsFile, indent=4)
settingsFile.close()
def saveProfileSettings(self, profile, general,
pronunciations, keybindings):
""" Save minimal subset defined in the profile against current
defaults. """
if profile is None:
profile = 'default'
general['pronunciations'] = pronunciations
general['keybindings'] = keybindings
with open(self.settingsFile, 'r+') as settingsFile:
prefs = load(settingsFile)
prefs['profiles'][profile] = general
settingsFile.seek(0)
settingsFile.truncate()
dump(prefs, settingsFile, indent=4)
def _getSettings(self):
""" Load from config file all settings """
settingsFile = open(self.settingsFile)
try:
prefs = load(settingsFile)
except ValueError:
return
self.general = prefs['general'].copy()
self.pronunciations = prefs['pronunciations']
self.keybindings = prefs['keybindings']
self.profiles = prefs['profiles'].copy()
def getGeneral(self, profile='default'):
""" Get general settings from default settings and
override with profile values. """
self._getSettings()
generalSettings = self.general.copy()
profileSettings = self.profiles[profile].copy()
for key, value in list(profileSettings.items()):
if key == 'voices':
for voiceType, voiceDef in list(value.items()):
value[voiceType] = acss.ACSS(voiceDef)
if key not in ['startingProfile', 'activeProfile']:
generalSettings[key] = value
try:
generalSettings['activeProfile'] = profileSettings['profile']
except KeyError:
generalSettings['activeProfile'] = ["Default", "default"]
return generalSettings
def getPronunciations(self, profile='default'):
""" Get pronunciation settings from default settings and
override with profile values. """
self._getSettings()
pronunciations = self.pronunciations.copy()
profileSettings = self.profiles[profile].copy()
if 'pronunciations' in profileSettings:
pronunciations = profileSettings['pronunciations']
return pronunciations
def getKeybindings(self, profile='default'):
""" Get keybindings settings from default settings and
override with profile values. """
self._getSettings()
keybindings = self.keybindings.copy()
profileSettings = self.profiles[profile].copy()
if 'keybindings' in profileSettings:
keybindings = profileSettings['keybindings']
return keybindings
def isFirstStart(self):
""" Check if we're in first start. """
return not os.path.exists(self.settingsFile)
def _setProfileKey(self, key, value):
self.general[key] = value
with open(self.settingsFile, 'r+') as settingsFile:
prefs = load(settingsFile)
prefs['general'][key] = value
settingsFile.seek(0)
settingsFile.truncate()
dump(prefs, settingsFile, indent=4)
def setFirstStart(self, value=False):
"""Set firstStart. This user-configurable settting is primarily
intended to serve as an indication as to whether or not initial
configuration is needed."""
self.general['firstStart'] = value
self._setProfileKey('firstStart', value)
def availableProfiles(self):
""" List available profiles. """
self._getSettings()
profiles = []
for profileName in list(self.profiles.keys()):
profileDict = self.profiles[profileName].copy()
profiles.append(profileDict.get('profile'))
return profiles
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/backends/json_backend.py
|
Python
|
gpl-3.0
| 7,195
|
[
"ORCA"
] |
2a8f7ecb53ffb368909a1aa15753eeefe01828cce6a04adc418d436309c91076
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP ouput files.
"""
import json
import glob
import itertools
import logging
import math
import os
import re
import warnings
from pathlib import Path
import xml.etree.cElementTree as ET
from collections import defaultdict
from io import StringIO
import collections
import numpy as np
from monty.io import zopen, reverse_readfile
from monty.json import MSONable
from monty.json import jsanitize
from monty.re import regrep
from monty.os.path import zpath
from monty.dev import deprecated
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.units import unitized
from pymatgen.electronic_structure.bandstructure import BandStructure, \
BandStructureSymmLine, get_reconstructed_band_structure
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType, Magmom
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import \
ComputedEntry, ComputedStructureEntry
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar
from pymatgen.util.io_utils import clean_lines, micro_pyawk
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, " + \
"Vincent L Chevrier, Ioannis Petousis, Stephen Dacek, Mark Turiansky"
__credits__ = "Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 30, 2012"
logger = logging.getLogger(__name__)
def _parse_parameters(val_type, val):
"""
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
"""
if val_type == "logical":
return val == "T"
elif val_type == "int":
return int(val)
elif val_type == "string":
return val.strip()
else:
return float(val)
def _parse_v_parameters(val_type, val, filename, param_name):
r"""
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains *** for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.split()]
elif val_type == "int":
try:
val = [int(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
return val
def _parse_varray(elem):
if elem.get("type", None) == 'logical':
m = [[True if i == 'T' else False for i in v.text.split()] for v in elem]
else:
m = [[_vasprun_float(i) for i in v.text.split()] for v in elem]
return m
def _parse_from_incar(filename, key):
"""
Helper function to parse a parameter from the INCAR.
"""
dirname = os.path.dirname(filename)
for f in os.listdir(dirname):
if re.search(r"INCAR", f):
warnings.warn("INCAR found. Using " + key + " from INCAR.")
incar = Incar.from_file(os.path.join(dirname, f))
if key in incar:
return incar[key]
else:
return None
return None
def _vasprun_float(f):
"""
Large numbers are often represented as ********* in the vasprun.
This function parses these values as np.nan
"""
try:
return float(f)
except ValueError as e:
f = f.strip()
if f == '*' * len(f):
warnings.warn('Float overflow (*******) encountered in vasprun')
return np.nan
raise e
class Vasprun(MSONable):
"""
Vastly improved cElementTree-based parser for vasprun.xml files. Uses
iterparse to support incremental parsing of large files.
Speedup over Dom is at least 2x for smallish files (~1Mb) to orders of
magnitude for larger files (~10Mb).
**Vasp results**
.. attribute:: ionic_steps
All ionic steps in the run as a list of
{"structure": structure at end of run,
"electronic_steps": {All electronic step data in vasprun file},
"stresses": stress matrix}
.. attribute:: tdos
Total dos calculated at the end of run.
.. attribute:: idos
Integrated dos calculated at the end of run.
.. attribute:: pdos
List of list of PDos objects. Access as pdos[atomindex][orbitalindex]
.. attribute:: efermi
Fermi energy
.. attribute:: eigenvalues
Available only if parse_eigen=True. Final eigenvalues as a dict of
{(spin, kpoint index):[[eigenvalue, occu]]}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
.. attribute:: projected_eigenvalues
Final projected eigenvalues as a dict of {spin: nd-array}. To access
a particular value, you need to do
Vasprun.projected_eigenvalues[spin][kpoint index][band index][atom index][orbital_index]
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint, band and atom indices are 0-based (unlike the 1-based indexing
in VASP).
.. attribute:: other_dielectric
Dictionary, with the tag comment as key, containing other variants of
the real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: nionic_steps
The total number of ionic steps. This number is always equal
to the total number of steps in the actual run even if
ionic_step_skip is used.
.. attribute:: force_constants
Force constants computed in phonon DFPT run(IBRION = 8).
The data is a 4D numpy array of shape (natoms, natoms, 3, 3).
.. attribute:: normalmode_eigenvals
Normal mode frequencies.
1D numpy array of size 3*natoms.
.. attribute:: normalmode_eigenvecs
Normal mode eigen vectors.
3D numpy array of shape (3*natoms, natoms, 3).
**Vasp inputs**
.. attribute:: incar
Incar object for parameters specified in INCAR file.
.. attribute:: parameters
Incar object with parameters that vasp actually used, including all
defaults.
.. attribute:: kpoints
Kpoints object for KPOINTS specified in run.
.. attribute:: actual_kpoints
List of actual kpoints, e.g.,
[[0.25, 0.125, 0.08333333], [-0.25, 0.125, 0.08333333],
[0.25, 0.375, 0.08333333], ....]
.. attribute:: actual_kpoints_weights
List of kpoint weights, E.g.,
[0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667, ....]
.. attribute:: atomic_symbols
List of atomic symbols, e.g., ["Li", "Fe", "Fe", "P", "P", "P"]
.. attribute:: potcar_symbols
List of POTCAR symbols. e.g.,
["PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", ..]
Author: Shyue Ping Ong
"""
def __init__(self, filename, ionic_step_skip=None,
ionic_step_offset=0, parse_dos=True,
parse_eigen=True, parse_projected_eigen=False,
parse_potcar_file=True, occu_tol=1e-8,
exception_on_bad_xml=True):
"""
Args:
filename (str): Filename to parse
ionic_step_skip (int): If ionic_step_skip is a number > 1,
only every ionic_step_skip ionic steps will be read for
structure and energies. This is very useful if you are parsing
very large vasprun.xml files and you are not interested in every
single ionic step. Note that the final energies may not be the
actual final energy in the vasprun.
ionic_step_offset (int): Used together with ionic_step_skip. If set,
the first ionic step read will be offset by the amount of
ionic_step_offset. For example, if you want to start reading
every 10th structure but only from the 3rd structure onwards,
set ionic_step_skip to 10 and ionic_step_offset to 3. Main use
case is when doing statistical structure analysis with
extremely long time scale multiple VASP calculations of
varying numbers of steps.
parse_dos (bool): Whether to parse the dos. Defaults to True. Set
to False to shave off significant time from the parsing if you
are not interested in getting those data.
parse_eigen (bool): Whether to parse the eigenvalues. Defaults to
True. Set to False to shave off significant time from the
parsing if you are not interested in getting those data.
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
exception_on_bad_xml (bool): Whether to throw a ParseException if a
malformed XML is detected. Default to True, which ensures only
proper vasprun.xml are parsed. You can set to False if you want
partial results (e.g., if you are monitoring a calculation during a
run), but use the results with care. A warning is issued.
"""
self.filename = filename
self.ionic_step_skip = ionic_step_skip
self.ionic_step_offset = ionic_step_offset
self.occu_tol = occu_tol
self.exception_on_bad_xml = exception_on_bad_xml
with zopen(filename, "rt") as f:
if ionic_step_skip or ionic_step_offset:
# remove parts of the xml file and parse the string
run = f.read()
steps = run.split("<calculation>")
# The text before the first <calculation> is the preamble!
preamble = steps.pop(0)
self.nionic_steps = len(steps)
new_steps = steps[ionic_step_offset::int(ionic_step_skip)]
# add the tailing informat in the last step from the run
to_parse = "<calculation>".join(new_steps)
if steps[-1] != new_steps[-1]:
to_parse = "{}<calculation>{}{}".format(
preamble, to_parse,
steps[-1].split("</calculation>")[-1])
else:
to_parse = "{}<calculation>{}".format(preamble, to_parse)
self._parse(StringIO(to_parse), parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
else:
self._parse(f, parse_dos=parse_dos, parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen)
self.nionic_steps = len(self.ionic_steps)
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
self.update_charge_from_potcar(parse_potcar_file)
if self.incar.get("ALGO", "") != "BSE" and (not self.converged):
msg = "%s is an unconverged VASP run.\n" % filename
msg += "Electronic convergence reached: %s.\n" % \
self.converged_electronic
msg += "Ionic convergence reached: %s." % self.converged_ionic
warnings.warn(msg, UnconvergedVASPWarning)
def _parse(self, stream, parse_dos, parse_eigen, parse_projected_eigen):
self.efermi = None
self.eigenvalues = None
self.projected_eigenvalues = None
self.dielectric_data = {}
self.other_dielectric = {}
ionic_steps = []
parsed_header = False
try:
for event, elem in ET.iterparse(stream):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
if not hasattr(self, 'kpoints'):
self.kpoints, self.actual_kpoints, self.actual_kpoints_weights = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "structure" and elem.attrib.get("name") == "initialpos":
self.initial_structure = self._parse_structure(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p,
"hash": None} for
p in self.potcar_symbols]
if tag == "calculation":
parsed_header = True
if not self.parameters.get("LCHIMAG", False):
ionic_steps.append(self._parse_calculation(elem))
else:
ionic_steps.extend(self._parse_chemical_shielding_calculation(elem))
elif parse_dos and tag == "dos":
try:
self.tdos, self.idos, self.pdos = self._parse_dos(elem)
self.efermi = self.tdos.efermi
self.dos_has_errors = False
except Exception:
self.dos_has_errors = True
elif parse_eigen and tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "dielectricfunction":
if ("comment" not in elem.attrib or
elem.attrib["comment"] ==
"INVERSE MACROSCOPIC DIELECTRIC TENSOR (including "
"local field effects in RPA (Hartree))"):
if 'density' not in self.dielectric_data:
self.dielectric_data['density'] = self._parse_diel(
elem)
elif 'velocity' not in self.dielectric_data:
# "velocity-velocity" is also named
# "current-current" in OUTCAR
self.dielectric_data['velocity'] = self._parse_diel(
elem)
else:
raise NotImplementedError(
'This vasprun.xml has >2 unlabelled dielectric '
'functions')
else:
comment = elem.attrib["comment"]
# VASP 6+ has labels for the density and current
# derived dielectric constants
if comment == "density-density":
self.dielectric_data["density"] = self._parse_diel(
elem)
elif comment == "current-current":
self.dielectric_data["velocity"] = self._parse_diel(
elem)
else:
self.other_dielectric[comment] = self._parse_diel(
elem)
elif tag == "varray" and elem.attrib.get("name") == 'opticaltransitions':
self.optical_transition = np.array(_parse_varray(elem))
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
elif tag == "dynmat":
hessian, eigenvalues, eigenvectors = self._parse_dynmat(elem)
natoms = len(self.atomic_symbols)
hessian = np.array(hessian)
self.force_constants = np.zeros((natoms, natoms, 3, 3), dtype='double')
for i in range(natoms):
for j in range(natoms):
self.force_constants[i, j] = hessian[i * 3:(i + 1) * 3, j * 3:(j + 1) * 3]
phonon_eigenvectors = []
for ev in eigenvectors:
phonon_eigenvectors.append(np.array(ev).reshape(natoms, 3))
self.normalmode_eigenvals = np.array(eigenvalues)
self.normalmode_eigenvecs = np.array(phonon_eigenvectors)
except ET.ParseError as ex:
if self.exception_on_bad_xml:
raise ex
else:
warnings.warn(
"XML is malformed. Parsing has stopped but partial data"
"is available.", UserWarning)
self.ionic_steps = ionic_steps
self.vasp_version = self.generator["version"]
@property
def structures(self):
"""
Returns:
List of Structure objects for the structure at each ionic step.
"""
return [step["structure"] for step in self.ionic_steps]
@property
def epsilon_static(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant. Present when it's a DFPT run
(LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon", [])
@property
def epsilon_static_wolfe(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant without any local field
effects. Present when it's a DFPT run (LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon_rpa", [])
@property
def epsilon_ionic(self):
"""
Property only available for DFPT calculations and when IBRION=5, 6, 7 or 8.
Returns:
The ionic part of the static dielectric constant. Present when it's a
DFPT run (LEPSILON=TRUE) and IBRION=5, 6, 7 or 8
"""
return self.ionic_steps[-1].get("epsilon_ion", [])
@property
def dielectric(self):
"""
Returns:
The real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
"""
return self.dielectric_data['density']
@property
def optical_absorption_coeff(self):
"""
Calculate the optical absorption coefficient
from the dielectric constants. Note that this method is only
implemented for optical properties calculated with GGA and BSE.
Returns:
optical absorption coefficient in list
"""
if self.dielectric_data["density"]:
real_avg = [sum(self.dielectric_data["density"][1][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
imag_avg = [sum(self.dielectric_data["density"][2][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))]
def f(freq, real, imag):
"""
The optical absorption coefficient calculated in terms of
equation
"""
hbar = 6.582119514e-16 # eV/K
coeff = np.sqrt(np.sqrt(real ** 2 + imag ** 2) - real) * np.sqrt(2) / hbar * freq
return coeff
absorption_coeff = [f(freq, real, imag) for freq, real, imag in
zip(self.dielectric_data["density"][0], real_avg, imag_avg)]
return absorption_coeff
@property
def converged_electronic(self):
"""
Returns:
True if electronic step convergence has been reached in the final
ionic step
"""
final_esteps = self.ionic_steps[-1]["electronic_steps"]
if 'LEPSILON' in self.incar and self.incar['LEPSILON']:
i = 1
to_check = set(['e_wo_entrp', 'e_fr_energy', 'e_0_energy'])
while set(final_esteps[i].keys()) == to_check:
i += 1
return i + 1 != self.parameters["NELM"]
return len(final_esteps) < self.parameters["NELM"]
@property
def converged_ionic(self):
"""
Returns:
True if ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw
@property
def converged(self):
"""
Returns:
True if a relaxation run is converged both ionically and
electronically.
"""
return self.converged_electronic and self.converged_ionic
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from the vasp run.
"""
try:
final_istep = self.ionic_steps[-1]
if final_istep["e_wo_entrp"] != final_istep['electronic_steps'][-1]["e_0_energy"]:
warnings.warn("Final e_wo_entrp differs from the final "
"electronic step. VASP may have included some "
"corrections, e.g., vdw. Vasprun will return "
"the final e_wo_entrp, i.e., including "
"corrections in such instances.")
return final_istep["e_wo_entrp"]
return final_istep['electronic_steps'][-1]["e_0_energy"]
except (IndexError, KeyError):
warnings.warn("Calculation does not have a total energy. "
"Possibly a GW or similar kind of run. A value of "
"infinity is returned.")
return float('inf')
@property
def complete_dos(self):
"""
A complete dos object which incorporates the total dos and all
projected dos.
"""
final_struct = self.final_structure
pdoss = {final_struct[i]: pdos for i, pdos in enumerate(self.pdos)}
return CompleteDos(self.final_structure, self.tdos, pdoss)
@property
def hubbards(self):
"""
Hubbard U values used if a vasprun is a GGA+U run. {} otherwise.
"""
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
if not self.incar.get("LDAU", False):
return {}
us = self.incar.get("LDAUU", self.parameters.get("LDAUU"))
js = self.incar.get("LDAUJ", self.parameters.get("LDAUJ"))
if len(js) != len(us):
js = [0] * len(us)
if len(us) == len(symbols):
return {symbols[i]: us[i] - js[i] for i in range(len(symbols))}
elif sum(us) == 0 and sum(js) == 0:
return {}
else:
raise VaspParserError("Length of U value parameters and atomic "
"symbols are mismatched")
@property
def run_type(self):
"""
Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs.
TODO: Fix for other functional types like PW91, other vdW types, etc.
"""
GGA_TYPES = {"RE": "revPBE", "PE": "PBE", "PS": "PBESol", "RP": "RevPBE+PADE", "AM": "AM05", "OR": "optPBE",
"BO": "optB88", "MK": "optB86b", "--": "GGA"}
METAGGA_TYPES = {"TPSS": "TPSS", "RTPSS": "revTPSS", "M06L": "M06-L", "MBJ": "modified Becke-Johnson",
"SCAN": "SCAN", "MS0": "MadeSimple0", "MS1": "MadeSimple1", "MS2": "MadeSimple2"}
if self.parameters.get("AEXX", 1.00) == 1.00:
rt = "HF"
elif self.parameters.get("HFSCREEN", 0.30) == 0.30:
rt = "HSE03"
elif self.parameters.get("HFSCREEN", 0.20) == 0.20:
rt = "HSE06"
elif self.parameters.get("AEXX", 0.20) == 0.20:
rt = "B3LYP"
elif self.parameters.get("LHFCALC", True):
rt = "PBEO or other Hybrid Functional"
elif self.parameters.get("LUSE_VDW", False):
if self.incar.get("METAGGA", "").strip().upper() in METAGGA_TYPES:
rt = METAGGA_TYPES[self.incar.get("METAGGA", "").strip().upper()] + "+rVV10"
else:
rt = GGA_TYPES[self.parameters.get("GGA", "").strip().upper()] + "+rVV10"
elif self.incar.get("METAGGA", "").strip().upper() in METAGGA_TYPES:
rt = METAGGA_TYPES[self.incar.get("METAGGA", "").strip().upper()]
if self.is_hubbard or self.parameters.get("LDAU", True):
rt += "+U"
elif self.potcar_symbols[0].split()[0] == 'PAW':
rt = "LDA"
elif self.parameters.get("GGA", "").strip().upper() in GGA_TYPES:
rt = GGA_TYPES[self.parameters.get("GGA", "").strip().upper()]
if self.is_hubbard or self.parameters.get("LDAU", True):
rt += "+U"
return rt
@property
def is_hubbard(self):
"""
True if run is a DFT+U run.
"""
if len(self.hubbards) == 0:
return False
return sum(self.hubbards.values()) > 1e-8
@property
def is_spin(self):
"""
True if run is spin-polarized.
"""
return self.parameters.get("ISPIN", 1) == 2
def get_computed_entry(self, inc_structure=True, parameters=None,
data=None):
"""
Returns a ComputedStructureEntry from the vasprun.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. If
parameters is None, a default set of parameters that are
necessary for typical post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
Returns:
ComputedStructureEntry/ComputedEntry
"""
param_names = {"is_hubbard", "hubbards", "potcar_symbols",
"potcar_spec", "run_type"}
if parameters:
param_names.update(parameters)
params = {p: getattr(self, p) for p in param_names}
data = {p: getattr(self, p) for p in data} if data is not None else {}
if inc_structure:
return ComputedStructureEntry(self.final_structure,
self.final_energy, parameters=params,
data=data)
else:
return ComputedEntry(self.final_structure.composition,
self.final_energy, parameters=params,
data=data)
def get_band_structure(self, kpoints_filename=None, efermi=None,
line_mode=False, force_hybrid_mode=False):
"""
Returns the band structure as a BandStructure object
Args:
kpoints_filename (str): Full path of the KPOINTS file from which
the band structure is generated.
If none is provided, the code will try to intelligently
determine the appropriate KPOINTS file by substituting the
filename of the vasprun.xml with KPOINTS.
The latter is the default behavior.
efermi (float): If you want to specify manually the fermi energy
this is where you should do it. By default, the None value
means the code will get it from the vasprun.
line_mode (bool): Force the band structure to be considered as
a run along symmetry lines.
force_hybrid_mode (bool): Makes it possible to read in self-consistent band structure calculations for
every type of functional
Returns:
a BandStructure object (or more specifically a
BandStructureSymmLine object if the run is detected to be a run
along symmetry lines)
Two types of runs along symmetry lines are accepted: non-sc with
Line-Mode in the KPOINT file or hybrid, self-consistent with a
uniform grid+a few kpoints along symmetry lines (explicit KPOINTS
file) (it's not possible to run a non-sc band structure with hybrid
functionals). The explicit KPOINTS file needs to have data on the
kpoint label as commentary.
"""
if not kpoints_filename:
kpoints_filename = zpath(
os.path.join(os.path.dirname(self.filename), 'KPOINTS'))
if not os.path.exists(kpoints_filename) and line_mode is True:
raise VaspParserError('KPOINTS needed to obtain band structure '
'along symmetry lines.')
if efermi is None:
efermi = self.efermi
kpoint_file = None
if os.path.exists(kpoints_filename):
kpoint_file = Kpoints.from_file(kpoints_filename)
lattice_new = Lattice(self.final_structure.lattice.reciprocal_lattice.matrix)
kpoints = [np.array(self.actual_kpoints[i])
for i in range(len(self.actual_kpoints))]
p_eigenvals = defaultdict(list)
eigenvals = defaultdict(list)
nkpts = len(kpoints)
for spin, v in self.eigenvalues.items():
v = np.swapaxes(v, 0, 1)
eigenvals[spin] = v[:, :, 0]
if self.projected_eigenvalues:
peigen = self.projected_eigenvalues[spin]
# Original axes for self.projected_eigenvalues are kpoints,
# band, ion, orb.
# For BS input, we need band, kpoints, orb, ion.
peigen = np.swapaxes(peigen, 0, 1) # Swap kpoint and band axes
peigen = np.swapaxes(peigen, 2, 3) # Swap ion and orb axes
p_eigenvals[spin] = peigen
# for b in range(min_eigenvalues):
# p_eigenvals[spin].append(
# [{Orbital(orb): v for orb, v in enumerate(peigen[b, k])}
# for k in range(nkpts)])
# check if we have an hybrid band structure computation
# for this we look at the presence of the LHFCALC tag
hybrid_band = False
if self.parameters.get('LHFCALC', False) or \
0. in self.actual_kpoints_weights:
hybrid_band = True
if kpoint_file is not None:
if kpoint_file.style == Kpoints.supported_modes.Line_mode:
line_mode = True
if line_mode:
labels_dict = {}
if hybrid_band or force_hybrid_mode:
start_bs_index = 0
for i in range(len(self.actual_kpoints)):
if self.actual_kpoints_weights[i] == 0.0:
start_bs_index = i
break
for i in range(start_bs_index, len(kpoint_file.kpts)):
if kpoint_file.labels[i] is not None:
labels_dict[kpoint_file.labels[i]] = \
kpoint_file.kpts[i]
# remake the data only considering line band structure k-points
# (weight = 0.0 kpoints)
nbands = len(eigenvals[Spin.up])
kpoints = kpoints[start_bs_index:nkpts]
up_eigen = [eigenvals[Spin.up][i][start_bs_index:nkpts]
for i in range(nbands)]
if self.projected_eigenvalues:
p_eigenvals[Spin.up] = [p_eigenvals[Spin.up][i][
start_bs_index:nkpts]
for i in range(nbands)]
if self.is_spin:
down_eigen = [eigenvals[Spin.down][i][start_bs_index:nkpts]
for i in range(nbands)]
eigenvals = {Spin.up: up_eigen, Spin.down: down_eigen}
if self.projected_eigenvalues:
p_eigenvals[Spin.down] = [p_eigenvals[Spin.down][i][
start_bs_index:nkpts]
for i in range(nbands)]
else:
eigenvals = {Spin.up: up_eigen}
else:
if '' in kpoint_file.labels:
raise Exception("A band structure along symmetry lines "
"requires a label for each kpoint. "
"Check your KPOINTS file")
labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts))
labels_dict.pop(None, None)
return BandStructureSymmLine(kpoints, eigenvals, lattice_new,
efermi, labels_dict,
structure=self.final_structure,
projections=p_eigenvals)
else:
return BandStructure(kpoints, eigenvals, lattice_new, efermi,
structure=self.final_structure,
projections=p_eigenvals)
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct).
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
for spin, d in self.eigenvalues.items():
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
def get_potcars(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar from path.
"""
def get_potcar_in_path(p):
for fn in os.listdir(os.path.abspath(p)):
if fn.startswith('POTCAR'):
pc = Potcar.from_file(os.path.join(p, fn))
if {d.header for d in pc} == \
{sym for sym in self.potcar_symbols}:
return pc
warnings.warn("No POTCAR file with matching TITEL fields"
" was found in {}".format(os.path.abspath(p)))
if isinstance(path, (str, Path)):
path = str(path)
if "POTCAR" in path:
potcar = Potcar.from_file(path)
if {d.TITEL for d in potcar} != \
{sym for sym in self.potcar_symbols}:
raise ValueError("Potcar TITELs do not match Vasprun")
else:
potcar = get_potcar_in_path(path)
elif isinstance(path, bool) and path:
potcar = get_potcar_in_path(os.path.split(self.filename)[0])
else:
potcar = None
return potcar
def update_potcar_spec(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar spec from path.
"""
potcar = self.get_potcars(path)
if potcar:
self.potcar_spec = [{"titel": sym, "hash": ps.get_potcar_hash()}
for sym in self.potcar_symbols
for ps in potcar if
ps.symbol == sym.split()[1]]
def update_charge_from_potcar(self, path):
"""
Sets the charge of a structure based on the POTCARs found.
:param path: Path to search for POTCARs
"""
potcar = self.get_potcars(path)
if potcar and self.incar.get("ALGO", "") not in ["GW0", "G0W0", "GW", "BSE"]:
nelect = self.parameters["NELECT"]
if len(potcar) == len(self.initial_structure.composition.element_composition):
potcar_nelect = sum([
self.initial_structure.composition.element_composition[ps.element] * ps.ZVAL
for ps in potcar])
else:
nums = [len(list(g)) for _, g in
itertools.groupby(self.atomic_symbols)]
potcar_nelect = sum(ps.ZVAL * num for ps, num in
zip(potcar, nums))
charge = nelect - potcar_nelect
if charge:
for s in self.structures:
s._charge = charge
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": self.converged,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.initial_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["nkpoints"] = len(actual_kpts)
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["input"] = vin
nsites = len(self.final_structure)
try:
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
except (ArithmeticError, TypeError):
vout = {"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": None,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = {str(spin): v.tolist()
for spin, v in self.eigenvalues.items()}
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
vout['projected_eigenvalues'] = {
str(spin): v.tolist()
for spin, v in self.projected_eigenvalues.items()}
vout['epsilon_static'] = self.epsilon_static
vout['epsilon_static_wolfe'] = self.epsilon_static_wolfe
vout['epsilon_ionic'] = self.epsilon_ionic
d['output'] = vout
return jsanitize(d, strict=True)
def _parse_params(self, elem):
params = {}
for c in elem:
name = c.attrib.get("name")
if c.tag not in ("i", "v"):
p = self._parse_params(c)
if name == "response functions":
# Delete duplicate fields from "response functions",
# which overrides the values in the root params.
p = {k: v for k, v in p.items() if k not in params}
params.update(p)
else:
ptype = c.attrib.get("type")
val = c.text.strip() if c.text else ""
if c.tag == "i":
params[name] = _parse_parameters(ptype, val)
else:
params[name] = _parse_v_parameters(ptype, val,
self.filename, name)
elem.clear()
return Incar(params)
def _parse_atominfo(self, elem):
for a in elem.findall("array"):
if a.attrib["name"] == "atoms":
atomic_symbols = [rc.find("c").text.strip()
for rc in a.find("set")]
elif a.attrib["name"] == "atomtypes":
potcar_symbols = [rc.findall("c")[4].text.strip()
for rc in a.find("set")]
# ensure atomic symbols are valid elements
def parse_atomic_symbol(symbol):
try:
return str(Element(symbol))
# vasprun.xml uses X instead of Xe for xenon
except ValueError as e:
if symbol == "X":
return "Xe"
elif symbol == "r":
return "Zr"
raise e
elem.clear()
return [parse_atomic_symbol(sym) for
sym in atomic_symbols], potcar_symbols
def _parse_kpoints(self, elem):
e = elem
if elem.find("generation"):
e = elem.find("generation")
k = Kpoints("Kpoints from vasprun.xml")
k.style = Kpoints.supported_modes.from_string(
e.attrib["param"] if "param" in e.attrib else "Reciprocal")
for v in e.findall("v"):
name = v.attrib.get("name")
toks = v.text.split()
if name == "divisions":
k.kpts = [[int(i) for i in toks]]
elif name == "usershift":
k.kpts_shift = [float(i) for i in toks]
elif name in {"genvec1", "genvec2", "genvec3", "shift"}:
setattr(k, name, [float(i) for i in toks])
for va in elem.findall("varray"):
name = va.attrib["name"]
if name == "kpointlist":
actual_kpoints = _parse_varray(va)
elif name == "weights":
weights = [i[0] for i in _parse_varray(va)]
elem.clear()
if k.style == Kpoints.supported_modes.Reciprocal:
k = Kpoints(comment="Kpoints from vasprun.xml",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(k.kpts),
kpts=actual_kpoints, kpts_weights=weights)
return k, actual_kpoints, weights
def _parse_structure(self, elem):
latt = _parse_varray(elem.find("crystal").find("varray"))
pos = _parse_varray(elem.find("varray"))
struct = Structure(latt, self.atomic_symbols, pos)
sdyn = elem.find("varray/[@name='selective']")
if sdyn:
struct.add_site_property('selective_dynamics',
_parse_varray(sdyn))
return struct
def _parse_diel(self, elem):
imag = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("imag").find("array").find("set").findall("r")]
real = [[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("real").find("array").find("set").findall("r")]
elem.clear()
return [e[0] for e in imag], \
[e[1:] for e in real], [e[1:] for e in imag]
def _parse_optical_transition(self, elem):
for va in elem.findall("varray"):
if va.attrib.get("name") == "opticaltransitions":
# opticaltransitions array contains oscillator strength and probability of transition
oscillator_strength = np.array(_parse_varray(va))[0:, ]
probability_transition = np.array(_parse_varray(va))[0:, 1]
return oscillator_strength, probability_transition
def _parse_chemical_shielding_calculation(self, elem):
calculation = []
istep = {}
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["structure"] = s
istep["electronic_steps"] = []
calculation.append(istep)
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
cur_ene = d['e_fr_energy']
min_steps = 1 if len(calculation) >= 1 else self.parameters.get("NELMIN", 5)
if len(calculation[-1]["electronic_steps"]) <= min_steps:
calculation[-1]["electronic_steps"].append(d)
else:
last_ene = calculation[-1]["electronic_steps"][-1]["e_fr_energy"]
if abs(cur_ene - last_ene) < 1.0:
calculation[-1]["electronic_steps"].append(d)
else:
calculation.append({"electronic_steps": [d]})
except AttributeError: # not all calculations have an energy
pass
calculation[-1].update(calculation[-1]["electronic_steps"][-1])
return calculation
def _parse_calculation(self, elem):
try:
istep = {i.attrib["name"]: float(i.text)
for i in elem.find("energy").findall("i")}
except AttributeError: # not all calculations have an energy
istep = {}
pass
esteps = []
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text)
for i in scstep.find("energy").findall("i")}
esteps.append(d)
except AttributeError: # not all calculations have an energy
pass
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["electronic_steps"] = esteps
istep["structure"] = s
elem.clear()
return istep
def _parse_dos(self, elem):
efermi = float(elem.find("i").text)
energies = None
tdensities = {}
idensities = {}
for s in elem.find("total").find("array").find("set").findall("set"):
data = np.array(_parse_varray(s))
energies = data[:, 0]
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
tdensities[spin] = data[:, 1]
idensities[spin] = data[:, 2]
pdoss = []
partial = elem.find("partial")
if partial is not None:
orbs = [ss.text for ss in partial.find("array").findall("field")]
orbs.pop(0)
lm = any(["x" in s for s in orbs])
for s in partial.find("array").find("set").findall("set"):
pdos = defaultdict(dict)
for ss in s.findall("set"):
spin = Spin.up if ss.attrib["comment"] == "spin 1" else \
Spin.down
data = np.array(_parse_varray(ss))
nrow, ncol = data.shape
for j in range(1, ncol):
if lm:
orb = Orbital(j - 1)
else:
orb = OrbitalType(j - 1)
pdos[orb][spin] = data[:, j]
pdoss.append(pdos)
elem.clear()
return Dos(efermi, energies, tdensities), Dos(efermi, energies, idensities), pdoss
def _parse_eigen(self, elem):
eigenvalues = defaultdict(list)
for s in elem.find("array").find("set").findall("set"):
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
for ss in s.findall("set"):
eigenvalues[spin].append(_parse_varray(ss))
eigenvalues = {spin: np.array(v) for spin, v in eigenvalues.items()}
elem.clear()
return eigenvalues
def _parse_projected_eigen(self, elem):
root = elem.find("array").find("set")
proj_eigen = defaultdict(list)
for s in root.findall("set"):
spin = int(re.match(r"spin(\d+)", s.attrib["comment"]).group(1))
# Force spin to be +1 or -1
spin = Spin.up if spin == 1 else Spin.down
for kpt, ss in enumerate(s.findall("set")):
dk = []
for band, sss in enumerate(ss.findall("set")):
db = _parse_varray(sss)
dk.append(db)
proj_eigen[spin].append(dk)
proj_eigen = {spin: np.array(v) for spin, v in proj_eigen.items()}
elem.clear()
return proj_eigen
def _parse_dynmat(self, elem):
hessian = []
eigenvalues = []
eigenvectors = []
for v in elem.findall("v"):
if v.attrib["name"] == "eigenvalues":
eigenvalues = [float(i) for i in v.text.split()]
for va in elem.findall("varray"):
if va.attrib["name"] == "hessian":
for v in va.findall("v"):
hessian.append([float(i) for i in v.text.split()])
elif va.attrib["name"] == "eigenvectors":
for v in va.findall("v"):
eigenvectors.append([float(i) for i in v.text.split()])
return hessian, eigenvalues, eigenvectors
class BSVasprun(Vasprun):
"""
A highly optimized version of Vasprun that parses only eigenvalues for
bandstructures. All other properties like structures, parameters,
etc. are ignored.
"""
def __init__(self, filename, parse_projected_eigen=False,
parse_potcar_file=False, occu_tol=1e-8):
"""
Args:
filename (str): Filename to parse
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
"""
self.filename = filename
self.occu_tol = occu_tol
with zopen(filename, "rt") as f:
self.efermi = None
parsed_header = False
self.eigenvalues = None
self.projected_eigenvalues = None
for event, elem in ET.iterparse(f):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
self.kpoints, self.actual_kpoints, self.actual_kpoints_weights = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p, "hash": None} for p in self.potcar_symbols]
parsed_header = True
elif tag == "i" and elem.attrib.get("name") == "efermi":
self.efermi = float(elem.text)
elif tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
self.projected_eigenvalues = self._parse_projected_eigen(
elem)
elif tag == "structure" and elem.attrib.get("name") == \
"finalpos":
self.final_structure = self._parse_structure(elem)
self.vasp_version = self.generator["version"]
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": True,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.final_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["input"] = vin
vout = {"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = defaultdict(dict)
for spin, values in self.eigenvalues.items():
for i, v in enumerate(values):
eigen[i][str(spin)] = v
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
peigen = []
for i in range(len(eigen)):
peigen.append({})
for spin, v in self.projected_eigenvalues.items():
for kpoint_index, vv in enumerate(v):
if str(spin) not in peigen[kpoint_index]:
peigen[kpoint_index][str(spin)] = vv
vout['projected_eigenvalues'] = peigen
d['output'] = vout
return jsanitize(d, strict=True)
class Outcar:
"""
Parser for data in OUTCAR that is not available in Vasprun.xml
Note, this class works a bit differently than most of the other
VaspObjects, since the OUTCAR can be very different depending on which
"type of run" performed.
Creating the OUTCAR class with a filename reads "regular parameters" that
are always present.
.. attribute:: magnetization
Magnetization on each ion as a tuple of dict, e.g.,
({"d": 0.0, "p": 0.003, "s": 0.002, "tot": 0.005}, ... )
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: chemical_shielding
chemical shielding on each ion as a dictionary with core and valence contributions
.. attribute:: unsym_cs_tensor
Unsymmetrized chemical shielding tensor matrixes on each ion as a list.
e.g.,
[[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]],
...
[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]]]
.. attribute:: cs_g0_contribution
G=0 contribution to chemical shielding. 2D rank 3 matrix
.. attribute:: cs_core_contribution
Core contribution to chemical shielding. dict. e.g.,
{'Mg': -412.8, 'C': -200.5, 'O': -271.1}
.. attribute:: efg
Electric Field Gradient (EFG) tensor on each ion as a tuple of dict, e.g.,
({"cq": 0.1, "eta", 0.2, "nuclear_quadrupole_moment": 0.3},
{"cq": 0.7, "eta", 0.8, "nuclear_quadrupole_moment": 0.9},
...)
.. attribute:: charge
Charge on each ion as a tuple of dict, e.g.,
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: is_stopped
True if OUTCAR is from a stopped run (using STOPCAR, see Vasp Manual).
.. attribute:: run_stats
Various useful run stats as a dict including "System time (sec)",
"Total CPU time used (sec)", "Elapsed time (sec)",
"Maximum memory used (kb)", "Average memory used (kb)",
"User time (sec)".
.. attribute:: elastic_tensor
Total elastic moduli (Kbar) is given in a 6x6 array matrix.
.. attribute:: drift
Total drift for each step in eV/Atom
.. attribute:: ngf
Dimensions for the Augementation grid
.. attribute: sampling_radii
Size of the sampling radii in VASP for the test charges for
the electrostatic potential at each atom. Total array size is the number
of elements present in the calculation
.. attribute: electrostatic_potential
Average electrostatic potential at each atomic position in order
of the atoms in POSCAR.
..attribute: final_energy_contribs
Individual contributions to the total final energy as a dictionary.
Include contirbutions from keys, e.g.:
{'DENC': -505778.5184347, 'EATOM': 15561.06492564, 'EBANDS': -804.53201231,
'EENTRO': -0.08932659, 'EXHF': 0.0, 'Ediel_sol': 0.0,
'PAW double counting': 664.6726974100002, 'PSCENC': 742.48691646,
'TEWEN': 489742.86847338, 'XCENC': -169.64189814}
One can then call a specific reader depending on the type of run being
performed. These are currently: read_igpar(), read_lepsilon() and
read_lcalcpol(), read_core_state_eign(), read_avg_core_pot().
See the documentation of those methods for more documentation.
Authors: Rickard Armiento, Shyue Ping Ong
"""
def __init__(self, filename):
"""
Args:
filename (str): OUTCAR filename to parse.
"""
self.filename = filename
self.is_stopped = False
# data from end of OUTCAR
charge = []
mag_x = []
mag_y = []
mag_z = []
header = []
run_stats = {}
total_mag = None
nelect = None
efermi = None
total_energy = None
time_patt = re.compile(r"\((sec|kb)\)")
efermi_patt = re.compile(r"E-fermi\s*:\s*(\S+)")
nelect_patt = re.compile(r"number of electron\s+(\S+)\s+magnetization")
mag_patt = re.compile(r"number of electron\s+\S+\s+magnetization\s+("
r"\S+)")
toten_pattern = re.compile(r"free energy TOTEN\s+=\s+([\d\-\.]+)")
all_lines = []
for line in reverse_readfile(self.filename):
clean = line.strip()
all_lines.append(clean)
if clean.find("soft stop encountered! aborting job") != -1:
self.is_stopped = True
else:
if time_patt.search(line):
tok = line.strip().split(":")
run_stats[tok[0].strip()] = float(tok[1].strip())
continue
m = efermi_patt.search(clean)
if m:
try:
# try-catch because VASP sometimes prints
# 'E-fermi: ******** XC(G=0): -6.1327
# alpha+bet : -1.8238'
efermi = float(m.group(1))
continue
except ValueError:
efermi = None
continue
m = nelect_patt.search(clean)
if m:
nelect = float(m.group(1))
m = mag_patt.search(clean)
if m:
total_mag = float(m.group(1))
if total_energy is None:
m = toten_pattern.search(clean)
if m:
total_energy = float(m.group(1))
if all([nelect, total_mag is not None, efermi is not None,
run_stats]):
break
# For single atom systems, VASP doesn't print a total line, so
# reverse parsing is very difficult
read_charge = False
read_mag_x = False
read_mag_y = False # for SOC calculations only
read_mag_z = False
all_lines.reverse()
for clean in all_lines:
if read_charge or read_mag_x or read_mag_y or read_mag_z:
if clean.startswith("# of ion"):
header = re.split(r"\s{2,}", clean.strip())
header.pop(0)
else:
m = re.match(r"\s*(\d+)\s+(([\d\.\-]+)\s+)+", clean)
if m:
toks = [float(i)
for i in re.findall(r"[\d\.\-]+", clean)]
toks.pop(0)
if read_charge:
charge.append(dict(zip(header, toks)))
elif read_mag_x:
mag_x.append(dict(zip(header, toks)))
elif read_mag_y:
mag_y.append(dict(zip(header, toks)))
elif read_mag_z:
mag_z.append(dict(zip(header, toks)))
elif clean.startswith('tot'):
read_charge = False
read_mag_x = False
read_mag_y = False
read_mag_z = False
if clean == "total charge":
charge = []
read_charge = True
read_mag_x, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (x)":
mag_x = []
read_mag_x = True
read_charge, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (y)":
mag_y = []
read_mag_y = True
read_charge, read_mag_x, read_mag_z = False, False, False
elif clean == "magnetization (z)":
mag_z = []
read_mag_z = True
read_charge, read_mag_x, read_mag_y = False, False, False
elif re.search("electrostatic", clean):
read_charge, read_mag_x, read_mag_y, read_mag_z = False, False, False, False
# merge x, y and z components of magmoms if present (SOC calculation)
if mag_y and mag_z:
# TODO: detect spin axis
mag = []
for idx in range(len(mag_x)):
mag.append({
key: Magmom([mag_x[idx][key], mag_y[idx][key], mag_z[idx][key]])
for key in mag_x[0].keys()
})
else:
mag = mag_x
# data from beginning of OUTCAR
run_stats['cores'] = 0
with zopen(filename, "rt") as f:
for line in f:
if "running" in line:
run_stats['cores'] = line.split()[2]
break
self.run_stats = run_stats
self.magnetization = tuple(mag)
self.charge = tuple(charge)
self.efermi = efermi
self.nelect = nelect
self.total_mag = total_mag
self.final_energy = total_energy
self.data = {}
# Read "total number of plane waves", NPLWV:
self.read_pattern(
{"nplwv": r"total plane-waves NPLWV =\s+(\*{6}|\d+)"},
terminate_on_match=True
)
try:
self.data["nplwv"] = [[int(self.data["nplwv"][0][0])]]
except ValueError:
self.data["nplwv"] = [[None]]
nplwvs_at_kpoints = [
n for [n] in self.read_table_pattern(
r"\n{3}-{104}\n{3}",
r".+plane waves:\s+(\*{6,}|\d+)",
r"maximum and minimum number of plane-waves"
)
]
self.data["nplwvs_at_kpoints"] = [None for n in nplwvs_at_kpoints]
for (n, nplwv) in enumerate(nplwvs_at_kpoints):
try:
self.data["nplwvs_at_kpoints"][n] = int(nplwv)
except ValueError:
pass
# Read the drift:
self.read_pattern({
"drift": r"total drift:\s+([\.\-\d]+)\s+([\.\-\d]+)\s+([\.\-\d]+)"},
terminate_on_match=False,
postprocess=float)
self.drift = self.data.get('drift', [])
# Check if calculation is spin polarized
self.spin = False
self.read_pattern({'spin': 'ISPIN = 2'})
if self.data.get('spin', []):
self.spin = True
# Check if calculation is noncollinear
self.noncollinear = False
self.read_pattern({'noncollinear': 'LNONCOLLINEAR = T'})
if self.data.get('noncollinear', []):
self.noncollinear = False
# Check if the calculation type is DFPT
self.dfpt = False
self.read_pattern({'ibrion': r"IBRION =\s+([\-\d]+)"},
terminate_on_match=True,
postprocess=int)
if self.data.get("ibrion", [[0]])[0][0] > 6:
self.dfpt = True
self.read_internal_strain_tensor()
# Check to see if LEPSILON is true and read piezo data if so
self.lepsilon = False
self.read_pattern({'epsilon': 'LEPSILON= T'})
if self.data.get('epsilon', []):
self.lepsilon = True
self.read_lepsilon()
# only read ionic contribution if DFPT is turned on
if self.dfpt:
self.read_lepsilon_ionic()
# Check to see if LCALCPOL is true and read polarization data if so
self.lcalcpol = False
self.read_pattern({'calcpol': 'LCALCPOL = T'})
if self.data.get('calcpol', []):
self.lcalcpol = True
self.read_lcalcpol()
self.read_pseudo_zval()
# Read electrostatic potential
self.read_pattern({
'electrostatic': r"average \(electrostatic\) potential at core"})
if self.data.get('electrostatic', []):
self.read_electrostatic_potential()
self.nmr_cs = False
self.read_pattern({"nmr_cs": r"LCHIMAG = (T)"})
if self.data.get("nmr_cs", None):
self.nmr_cs = True
self.read_chemical_shielding()
self.read_cs_g0_contribution()
self.read_cs_core_contribution()
self.read_cs_raw_symmetrized_tensors()
self.nmr_efg = False
self.read_pattern({"nmr_efg": r"NMR quadrupolar parameters"})
if self.data.get("nmr_efg", None):
self.nmr_efg = True
self.read_nmr_efg()
self.read_nmr_efg_tensor()
self.has_onsite_density_matrices = False
self.read_pattern({"has_onsite_density_matrices": r"onsite density matrix"},
terminate_on_match=True)
if "has_onsite_density_matrices" in self.data:
self.has_onsite_density_matrices = True
self.read_onsite_density_matrices()
# Store the individual contributions to the final total energy
final_energy_contribs = {}
for k in ["PSCENC", "TEWEN", "DENC", "EXHF", "XCENC", "PAW double counting",
"EENTRO", "EBANDS", "EATOM", "Ediel_sol"]:
if k == "PAW double counting":
self.read_pattern({k: r"%s\s+=\s+([\.\-\d]+)\s+([\.\-\d]+)" % (k)})
else:
self.read_pattern({k: r"%s\s+=\s+([\d\-\.]+)" % (k)})
if not self.data[k]:
continue
final_energy_contribs[k] = sum([float(f) for f in self.data[k][-1]])
self.final_energy_contribs = final_energy_contribs
def read_pattern(self, patterns, reverse=False, terminate_on_match=False,
postprocess=str):
r"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(self.filename, patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(self, header_pattern, row_pattern, footer_pattern,
postprocess=str, attribute_name=None,
last_one_only=True):
r"""
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, 'rt') as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.split("\n"):
ml = rp.search(line)
# skip empty lines
if not ml:
continue
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.append(processed_line)
tables.append(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get("ngf", [[]])[0]
pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"}
self.read_pattern(pattern, reverse=True, terminate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data["radii"][0][0].split()]
header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s*[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = "".join(itertools.chain.from_iterable(pots))
pots = re.findall(r"\s+\d+\s*([\.\-\d]+)+", pots)
self.electrostatic_potential = [float(f) for f in pots]
def read_freq_dielectric(self):
"""
Parses the frequency dependent dielectric function (obtained with
LOPTICS). Frequencies (in eV) are in self.frequencies, and dielectric
tensor function is given as self.dielectric_tensor_function.
"""
plasma_pattern = r"plasma frequency squared.*"
dielectric_pattern = r"frequency dependent\s+IMAGINARY " \
r"DIELECTRIC FUNCTION \(independent particle, " \
r"no local field effects\)(\sdensity-density)*$"
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 3)
plasma_frequencies = collections.defaultdict(list)
read_plasma = False
read_dielectric = False
energies = []
data = {"REAL": [], "IMAGINARY": []}
count = 0
component = "IMAGINARY"
with zopen(self.filename, "rt") as f:
for l in f:
l = l.strip()
if re.match(plasma_pattern, l):
read_plasma = "intraband" if "intraband" in l else "interband"
elif re.match(dielectric_pattern, l):
read_plasma = False
read_dielectric = True
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 7)
if read_plasma and re.match(row_pattern, l):
plasma_frequencies[read_plasma].append(
[float(t) for t in l.strip().split()])
elif read_dielectric:
if re.match(row_pattern, l.strip()):
toks = l.strip().split()
if component == "IMAGINARY":
energies.append(float(toks[0]))
xx, yy, zz, xy, yz, xz = [float(t) for t in toks[1:]]
matrix = [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]]
data[component].append(matrix)
elif re.match(r"\s*-+\s*", l):
count += 1
if count == 2:
component = "REAL"
elif count == 3:
break
self.plasma_frequencies = {k: np.array(v[:3])
for k, v in plasma_frequencies.items()}
self.dielectric_energies = np.array(energies)
self.dielectric_tensor_function = np.array(data["REAL"]) + 1j * np.array(data["IMAGINARY"])
@property # type: ignore
@deprecated(message="frequencies has been renamed to dielectric_energies.")
def frequencies(self):
"""
Renamed to dielectric energies.
"""
return self.dielectric_energies
def read_chemical_shielding(self):
"""
Parse the NMR chemical shieldings data. Only the second part "absolute, valence and core"
will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved.
Returns:
List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted.
"""
header_pattern = r"\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, " \
r"285 \(1993\)\)\s+" \
r"\s+-{50,}\s+" \
r"\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+" \
r"\s+-{20,}\s+-{20,}\s+" \
r"\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+" \
r"-{50,}\s*$"
first_part_pattern = r"\s+\(absolute, valence only\)\s+$"
swallon_valence_body_pattern = r".+?\(absolute, valence and core\)\s+$"
row_pattern = r"\d+(?:\s+[-]?\d+\.\d+){3}\s+" + r'\s+'.join(
[r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"-{50,}\s*$"
h1 = header_pattern + first_part_pattern
cs_valence_only = self.read_table_pattern(
h1, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
h2 = header_pattern + swallon_valence_body_pattern
cs_valence_and_core = self.read_table_pattern(
h2, row_pattern, footer_pattern, postprocess=float,
last_one_only=True)
all_cs = {}
for name, cs_table in [["valence_only", cs_valence_only],
["valence_and_core", cs_valence_and_core]]:
all_cs[name] = cs_table
self.data["chemical_shielding"] = all_cs
def read_cs_g0_contribution(self):
"""
Parse the G0 contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+G\=0 CONTRIBUTION TO CHEMICAL SHIFT \(field along BDIR\)\s+$\n' \
r'^\s+-{50,}$\n' \
r'^\s+BDIR\s+X\s+Y\s+Z\s*$\n' \
r'^\s+-{50,}\s*$\n'
row_pattern = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 3)
footer_pattern = r'\s+-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="cs_g0_contribution")
def read_cs_core_contribution(self):
"""
Parse the core contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = r'^\s+Core NMR properties\s*$\n' \
r'\n' \
r'^\s+typ\s+El\s+Core shift \(ppm\)\s*$\n' \
r'^\s+-{20,}$\n'
row_pattern = r'\d+\s+(?P<element>[A-Z][a-z]?\w?)\s+(?P<shift>[-]?\d+\.\d+)'
footer_pattern = r'\s+-{20,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=str,
last_one_only=True, attribute_name="cs_core_contribution")
core_contrib = {d['element']: float(d['shift'])
for d in self.data["cs_core_contribution"]}
self.data["cs_core_contribution"] = core_contrib
def read_cs_raw_symmetrized_tensors(self):
"""
Parse the matrix form of NMR tensor before corrected to table.
Returns:
nsymmetrized tensors list in the order of atoms.
"""
header_pattern = r"\s+-{50,}\s+" \
r"\s+Absolute Chemical Shift tensors\s+" \
r"\s+-{50,}$"
first_part_pattern = r"\s+UNSYMMETRIZED TENSORS\s+$"
row_pattern = r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
unsym_footer_pattern = r"^\s+SYMMETRIZED TENSORS\s+$"
with zopen(self.filename, 'rt') as f:
text = f.read()
unsym_table_pattern_text = header_pattern + first_part_pattern + r"(?P<table_body>.+)" + unsym_footer_pattern
table_pattern = re.compile(unsym_table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
m = table_pattern.search(text)
if m:
table_text = m.group("table_body")
micro_header_pattern = r"ion\s+\d+"
micro_table_pattern_text = micro_header_pattern + r"\s*^(?P<table_body>(?:\s*" + row_pattern + r")+)\s+"
micro_table_pattern = re.compile(micro_table_pattern_text,
re.MULTILINE | re.DOTALL)
unsym_tensors = []
for mt in micro_table_pattern.finditer(table_text):
table_body_text = mt.group("table_body")
tensor_matrix = []
for line in table_body_text.rstrip().split("\n"):
ml = rp.search(line)
processed_line = [float(v) for v in ml.groups()]
tensor_matrix.append(processed_line)
unsym_tensors.append(tensor_matrix)
self.data["unsym_cs_tensor"] = unsym_tensors
else:
raise ValueError("NMR UNSYMMETRIZED TENSORS is not found")
def read_nmr_efg_tensor(self):
"""
Parses the NMR Electric Field Gradient Raw Tensors
Returns:
A list of Electric Field Gradient Tensors in the order of Atoms from OUTCAR
"""
header_pattern = r'Electric field gradients \(V/A\^2\)\n' \
r'-*\n' \
r' ion\s+V_xx\s+V_yy\s+V_zz\s+V_xy\s+V_xz\s+V_yz\n' \
r'-*\n'
row_pattern = r'\d+\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)'
footer_pattern = r'-*\n'
data = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
tensors = [make_symmetric_matrix_from_upper_tri(d) for d in data]
self.data["unsym_efg_tensor"] = tensors
return tensors
def read_nmr_efg(self):
"""
Parse the NMR Electric Field Gradient interpretted values.
Returns:
Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.
Each dict key/value pair corresponds to a component of the tensors.
"""
header_pattern = r'^\s+NMR quadrupolar parameters\s+$\n' \
r'^\s+Cq : quadrupolar parameter\s+Cq=e[*]Q[*]V_zz/h$\n' \
r'^\s+eta: asymmetry parameters\s+\(V_yy - V_xx\)/ V_zz$\n' \
r'^\s+Q : nuclear electric quadrupole moment in mb \(millibarn\)$\n' \
r'^-{50,}$\n' \
r'^\s+ion\s+Cq\(MHz\)\s+eta\s+Q \(mb\)\s+$\n' \
r'^-{50,}\s*$\n'
row_pattern = r'\d+\s+(?P<cq>[-]?\d+\.\d+)\s+(?P<eta>[-]?\d+\.\d+)\s+' \
r'(?P<nuclear_quadrupole_moment>[-]?\d+\.\d+)'
footer_pattern = r'-{50,}\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,
last_one_only=True, attribute_name="efg")
def read_elastic_tensor(self):
"""
Parse the elastic tensor data.
Returns:
6x6 array corresponding to the elastic tensor from the OUTCAR.
"""
header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+" \
r"Direction\s+([X-Z][X-Z]\s+)+" \
r"\-+"
row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"\-+"
et_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["elastic_tensor"] = et_table
def read_piezo_tensor(self):
"""
Parse the piezo tensor data
"""
header_pattern = r"PIEZOELECTRIC TENSOR for field in x, y, " \
r"z\s+\(C/m\^2\)\s+([X-Z][X-Z]\s+)+\-+"
row_pattern = r"[x-z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"BORN EFFECTIVE"
pt_table = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=float)
self.data["piezo_tensor"] = pt_table
def read_onsite_density_matrices(self):
"""
Parse the onsite density matrices, returns list with index corresponding
to atom index in Structure.
"""
# matrix size will vary depending on if d or f orbitals are present
# therefore regex assumes f, but filter out None values if d
header_pattern = r"spin component 1\n"
row_pattern = r'[^\S\r\n]*(?:([\d.-]+))' + r'(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?' * 6 + r'.*?'
footer_pattern = r"\nspin component 2"
spin1_component = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=lambda x: float(x) if x else None,
last_one_only=False)
# filter out None values
spin1_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin1_component]
# and repeat for Spin.down
header_pattern = r"spin component 2\n"
row_pattern = r'[^\S\r\n]*(?:([\d.-]+))' + r'(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?' * 6 + r'.*?'
footer_pattern = r"\n occupancies and eigenvectors"
spin2_component = self.read_table_pattern(header_pattern, row_pattern,
footer_pattern, postprocess=lambda x: float(x) if x else None,
last_one_only=False)
spin2_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin2_component]
self.data["onsite_density_matrices"] = [
{
Spin.up: spin1_component[idx],
Spin.down: spin2_component[idx]
}
for idx in range(len(spin1_component))
]
def read_corrections(self, reverse=True, terminate_on_match=True):
"""
Reads the dipol qudropol corrections into the
Outcar.data["dipol_quadrupol_correction"].
:param reverse: Whether to start from end of OUTCAR.
:param terminate_on_match: Whether to terminate once match is found.
"""
patterns = {
"dipol_quadrupol_correction": r"dipol\+quadrupol energy "
r"correction\s+([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=float)
self.data["dipol_quadrupol_correction"] = self.data["dipol_quadrupol_correction"][0][0]
def read_neb(self, reverse=True, terminate_on_match=True):
"""
Reads NEB data. This only works with OUTCARs from both normal
VASP NEB calculations or from the CI NEB method implemented by
Henkelman et al.
Args:
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match. Defaults to True here since we usually
want only the final value.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern. Defaults to True here
since we usually want only the final value.
Renders accessible:
tangent_force - Final tangent force.
energy - Final energy.
These can be accessed under Outcar.data[key]
"""
patterns = {
"energy": r"energy\(sigma->0\)\s+=\s+([\d\-\.]+)",
"tangent_force": r"(NEB: projections on to tangent \(spring, REAL\)\s+\S+|tangential force \(eV/A\))\s+"
r"([\d\-\.]+)"
}
self.read_pattern(patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=str)
self.data["energy"] = float(self.data["energy"][0][0])
if self.data.get("tangent_force"):
self.data["tangent_force"] = float(
self.data["tangent_force"][0][1])
def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be filled
self.er_ev = {} # will be dict (Spin.up/down) of array(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of array(3*float)
self.er_ev_tot = None # will be array(3*float)
self.er_bp_tot = None # will be array(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = np.array(map(float,
match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.append([r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, er_ev])
def er_bp(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2, er_bp])
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)])
results.context = Spin.up
search.append([r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_up])
def er_bp_up(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.up, er_bp_up])
def er_ev_dn(results, match):
results.er_ev[Spin.down] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
results.context = Spin.down
search.append([r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_dn])
def er_bp_dn(results, match):
results.er_bp[Spin.down] = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.down, er_bp_dn])
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = np.array([float(match.group(i)) for i in range(1, 4)])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_elc])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(i)) for i in range(1, 4)])
search.append([r"^.*ionic dipole moment: "
r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_ion])
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and \
self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and \
self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except Exception:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.")
def read_internal_strain_tensor(self):
"""
Reads the internal strain tensor and populates self.internal_strain_tensor with an array of voigt notation
tensors for each site.
"""
search = []
def internal_strain_start(results, match):
results.internal_strain_ion = int(match.group(1)) - 1
results.internal_strain_tensor.append(np.zeros((3, 6)))
search.append([r"INTERNAL STRAIN TENSOR FOR ION\s+(\d+)\s+for displacements in x,y,z \(eV/Angst\):",
None, internal_strain_start])
def internal_strain_data(results, match):
if match.group(1).lower() == "x":
index = 0
elif match.group(1).lower() == "y":
index = 1
elif match.group(1).lower() == "z":
index = 2
else:
raise Exception(
"Couldn't parse row index from symbol for internal strain tensor: {}".format(match.group(1)))
results.internal_strain_tensor[results.internal_strain_ion][index] = np.array([float(match.group(i))
for i in range(2, 8)])
if index == 2:
results.internal_strain_ion = None
search.append([r"^\s+([x,y,z])\s+" + r"([-]?\d+\.\d+)\s+" * 6,
lambda results, line: results.internal_strain_ion is not None,
internal_strain_data])
self.internal_strain_ion = None
self.internal_strain_tensor = []
micro_pyawk(self.filename, search, self)
def read_lepsilon(self):
"""
Reads an LEPSILON run.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR \(", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index == -1,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_tensor[results.dielectric_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_index >= 0
if results.dielectric_index is not None
else None,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_index >= 1
if results.dielectric_index is not None
else None,
dielectric_section_stop])
self.dielectric_index = None
self.dielectric_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_index = 0
search.append([r"PIEZOELECTRIC TENSOR for field in x, y, z "
r"\(C/m\^2\)",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_tensor[results.piezo_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_index >= 0
if results.piezo_index is not None
else None,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.piezo_index >= 1
if results.piezo_index is not None
else None,
piezo_section_stop])
self.piezo_index = None
self.piezo_tensor = np.zeros((3, 6))
def born_section_start(results, match):
results.born_ion = -1
search.append([r"BORN EFFECTIVE CHARGES ",
None, born_section_start])
def born_ion(results, match):
results.born_ion = int(match.group(1)) - 1
results.born.append(np.zeros((3, 3)))
search.append([r"ion +([0-9]+)", lambda results, line: results.born_ion is not None, born_ion])
def born_data(results, match):
results.born[results.born_ion][int(match.group(1)) - 1, :] = \
np.array([float(match.group(i)) for i in range(2, 5)])
search.append(
[r"^ *([1-3]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+)$",
lambda results, line: results.born_ion >= 0
if results.born_ion is not None
else results.born_ion,
born_data])
def born_section_stop(results, match):
results.born_ion = None
search.append(
[r"-------------------------------------",
lambda results, line: results.born_ion >= 1
if results.born_ion is not None
else results.born_ion,
born_section_stop])
self.born_ion = None
self.born = []
micro_pyawk(self.filename, search, self)
self.born = np.array(self.born)
self.dielectric_tensor = self.dielectric_tensor.tolist()
self.piezo_tensor = self.piezo_tensor.tolist()
except Exception:
raise Exception("LEPSILON OUTCAR could not be parsed.")
def read_lepsilon_ionic(self):
"""
Reads an LEPSILON run, the ionic component.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_ionic_index = -1
search.append([r"MACROSCOPIC STATIC DIELECTRIC TENSOR IONIC", None,
dielectric_section_start])
def dielectric_section_start2(results, match):
results.dielectric_ionic_index = 0
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index == -1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_start2])
def dielectric_data(results, match):
results.dielectric_ionic_tensor[results.dielectric_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 4)])
results.dielectric_ionic_index += 1
search.append(
[r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_ionic_index >= 0
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_data])
def dielectric_section_stop(results, match):
results.dielectric_ionic_index = None
search.append(
[r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index >= 1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_stop])
self.dielectric_ionic_index = None
self.dielectric_ionic_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_ionic_index = 0
search.append([r"PIEZOELECTRIC TENSOR IONIC CONTR for field in "
r"x, y, z ",
None, piezo_section_start])
def piezo_data(results, match):
results.piezo_ionic_tensor[results.piezo_ionic_index, :] = \
np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_ionic_index += 1
search.append(
[r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)" +
r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_ionic_index >= 0
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_data])
def piezo_section_stop(results, match):
results.piezo_ionic_index = None
search.append(
["-------------------------------------",
lambda results, line: results.piezo_ionic_index >= 1
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_section_stop])
self.piezo_ionic_index = None
self.piezo_ionic_tensor = np.zeros((3, 6))
micro_pyawk(self.filename, search, self)
self.dielectric_ionic_tensor = self.dielectric_ionic_tensor.tolist()
self.piezo_ionic_tensor = self.piezo_ionic_tensor.tolist()
except Exception:
raise Exception(
"ionic part of LEPSILON OUTCAR could not be parsed.")
def read_lcalcpol(self):
"""
Reads the lcalpol.
# TODO: Document the actual variables.
"""
self.p_elec = None
self.p_sp1 = None
self.p_sp2 = None
self.p_ion = None
try:
search = []
# Always present spin/non-spin
def p_elec(results, match):
results.p_elec = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_elec])
# If spin-polarized (and not noncollinear)
# save spin-polarized electronic values
if self.spin and not self.noncollinear:
def p_sp1(results, match):
results.p_sp1 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp1\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp1])
def p_sp2(results, match):
results.p_sp2 = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*p\[sp2\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, p_sp2])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^.*Ionic dipole moment: *p\[ion\]="
r"\( *([-0-9.Ee+]*)"
r" *([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, p_ion])
micro_pyawk(self.filename, search, self)
except Exception:
raise Exception("LCALCPOL OUTCAR could not be parsed.")
def read_pseudo_zval(self):
"""
Create pseudopotential ZVAL dictionary.
"""
try:
def atom_symbols(results, match):
element_symbol = match.group(1)
if not hasattr(results, 'atom_symbols'):
results.atom_symbols = []
results.atom_symbols.append(element_symbol.strip())
def zvals(results, match):
zvals = match.group(1)
results.zvals = map(float, re.findall(r'-?\d+\.\d*', zvals))
search = []
search.append([r'(?<=VRHFIN =)(.*)(?=:)', None, atom_symbols])
search.append([r'^\s+ZVAL.*=(.*)', None, zvals])
micro_pyawk(self.filename, search, self)
zval_dict = {}
for x, y in zip(self.atom_symbols, self.zvals):
zval_dict.update({x: y})
self.zval_dict = zval_dict
# Clean-up
del (self.atom_symbols)
del (self.zvals)
except Exception:
raise Exception("ZVAL dict could not be parsed.")
def read_core_state_eigen(self):
"""
Read the core state eigenenergies at each ionic step.
Returns:
A list of dict over the atom such as [{"AO":[core state eig]}].
The core state eigenenergie list for each AO is over all ionic
step.
Example:
The core state eigenenergie of the 2s AO of the 6th atom of the
structure at the last ionic step is [5]["2s"][-1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
while line != "":
line = foutcar.readline()
if "NIONS =" in line:
natom = int(line.split("NIONS =")[1])
cl = [defaultdict(list) for i in range(natom)]
if "the core state eigen" in line:
iat = -1
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
break
data = line.split()
# data will contain odd number of elements if it is
# the start of a new entry, or even number of elements
# if it continues the previous entry
if len(data) % 2 == 1:
iat += 1 # started parsing a new ion
data = data[1:] # remove element with ion number
for i in range(0, len(data), 2):
cl[iat][data[i]].append(float(data[i + 1]))
return cl
def read_avg_core_poten(self):
"""
Read the core potential at each ionic step.
Returns:
A list for each ionic step containing a list of the average core
potentials for each atom: [[avg core pot]].
Example:
The average core potential of the 2nd atom of the structure at the
last ionic step is: [-1][1]
"""
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a = iter(iterable)
return zip(a, a)
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
aps = []
while line != "":
line = foutcar.readline()
if "the norm of the test charge is" in line:
ap = []
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
aps.append(ap)
break
data = line.split()
# the average core potentials of up to 5 elements are
# given per line
for i, pot in pairwise(data):
ap.append(float(pot))
return aps
def as_dict(self):
"""
:return: MSONAble dict.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__, "efermi": self.efermi,
"run_stats": self.run_stats, "magnetization": self.magnetization,
"charge": self.charge, "total_magnetization": self.total_mag,
"nelect": self.nelect, "is_stopped": self.is_stopped,
"drift": self.drift, "ngf": self.ngf,
"sampling_radii": self.sampling_radii,
"electrostatic_potential": self.electrostatic_potential}
if self.lepsilon:
d.update({"piezo_tensor": self.piezo_tensor,
"dielectric_tensor": self.dielectric_tensor,
"born": self.born})
if self.dfpt:
d.update({"internal_strain_tensor": self.internal_strain_tensor})
if self.dfpt and self.lepsilon:
d.update({"piezo_ionic_tensor": self.piezo_ionic_tensor,
"dielectric_ionic_tensor": self.dielectric_ionic_tensor})
if self.lcalcpol:
d.update({'p_elec': self.p_elec,
'p_ion': self.p_ion})
if self.spin and not self.noncollinear:
d.update({'p_sp1': self.p_sp1,
'p_sp2': self.p_sp2})
d.update({'zval_dict': self.zval_dict})
if self.nmr_cs:
d.update({"nmr_cs": {"valence and core": self.data["chemical_shielding"]["valence_and_core"],
"valence_only": self.data["chemical_shielding"]["valence_only"],
"g0": self.data["cs_g0_contribution"],
"core": self.data["cs_core_contribution"],
"raw": self.data["unsym_cs_tensor"]}})
if self.nmr_efg:
d.update({"nmr_efg": {"raw": self.data["unsym_efg_tensor"],
"parameters": self.data["efg"]}})
if self.has_onsite_density_matrices:
# cast Spin to str for consistency with electronic_structure
# TODO: improve handling of Enum (de)serialization in monty
onsite_density_matrices = [{str(k): v for k, v in d.items()}
for d in self.data["onsite_density_matrices"]]
d.update({"onsite_density_matrices": onsite_density_matrices})
return d
def read_fermi_contact_shift(self):
"""
output example:
Fermi contact (isotropic) hyperfine coupling parameter (MHz)
-------------------------------------------------------------
ion A_pw A_1PS A_1AE A_1c A_tot
-------------------------------------------------------------
1 -0.002 -0.002 -0.051 0.000 -0.052
2 -0.002 -0.002 -0.051 0.000 -0.052
3 0.056 0.056 0.321 -0.048 0.321
-------------------------------------------------------------
, which corresponds to
[[-0.002, -0.002, -0.051, 0.0, -0.052],
[-0.002, -0.002, -0.051, 0.0, -0.052],
[0.056, 0.056, 0.321, -0.048, 0.321]] from 'fch' data
"""
# Fermi contact (isotropic) hyperfine coupling parameter (MHz)
header_pattern1 = r"\s*Fermi contact \(isotropic\) hyperfine coupling parameter \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_pw\s+A_1PS\s+A_1AE\s+A_1c\s+A_tot\s+" \
r"\s*\-+"
row_pattern1 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 5)
footer_pattern = r"\-+"
fch_table = self.read_table_pattern(header_pattern1, row_pattern1,
footer_pattern, postprocess=float,
last_one_only=True)
# Dipolar hyperfine coupling parameters (MHz)
header_pattern2 = r"\s*Dipolar hyperfine coupling parameters \(MHz\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+A_xy\s+A_xz\s+A_yz\s+" \
r"\s*\-+"
row_pattern2 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 6)
dh_table = self.read_table_pattern(header_pattern2, row_pattern2,
footer_pattern, postprocess=float,
last_one_only=True)
# Total hyperfine coupling parameters after diagonalization (MHz)
header_pattern3 = r"\s*Total hyperfine coupling parameters after diagonalization \(MHz\)\s+" \
r"\s*\(convention: \|A_zz\| > \|A_xx\| > \|A_yy\|\)\s+" \
r"\s*\-+" \
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+asymmetry \(A_yy - A_xx\)/ A_zz\s+" \
r"\s*\-+"
row_pattern3 = r'(?:\d+)\s+' + r'\s+'.join([r'([-]?\d+\.\d+)'] * 4)
th_table = self.read_table_pattern(header_pattern3, row_pattern3,
footer_pattern, postprocess=float,
last_one_only=True)
fc_shift_table = {'fch': fch_table, 'dh': dh_table, 'th': th_table}
self.data["fermi_contact_shift"] = fc_shift_table
class VolumetricData(MSONable):
"""
Simple volumetric object for reading LOCPOT and CHGCAR type files.
.. attribute:: structure
Structure associated with the Volumetric Data object
..attribute:: is_spin_polarized
True if run is spin polarized
..attribute:: dim
Tuple of dimensions of volumetric grid in each direction (nx, ny, nz).
..attribute:: data
Actual data as a dict of {string: np.array}. The string are "total"
and "diff", in accordance to the output format of vasp LOCPOT and
CHGCAR files where the total spin density is written first, followed
by the difference spin density.
.. attribute:: ngridpts
Total number of grid points in volumetric data.
"""
def __init__(self, structure, data, distance_matrix=None, data_aug=None):
"""
Typically, this constructor is not used directly and the static
from_file constructor is used. This constructor is designed to allow
summation and other operations between VolumetricData objects.
Args:
structure: Structure associated with the volumetric data
data: Actual volumetric data.
data_aug: Any extra information associated with volumetric data
(typically augmentation charges)
distance_matrix: A pre-computed distance matrix if available.
Useful so pass distance_matrices between sums,
shortcircuiting an otherwise expensive operation.
"""
self.structure = structure
self.is_spin_polarized = len(data) >= 2
self.is_soc = len(data) >= 4
self.dim = data["total"].shape
self.data = data
self.data_aug = data_aug if data_aug else {}
self.ngridpts = self.dim[0] * self.dim[1] * self.dim[2]
# lazy init the spin data since this is not always needed.
self._spin_data = {}
self._distance_matrix = {} if not distance_matrix else distance_matrix
@property
def spin_data(self):
"""
The data decomposed into actual spin data as {spin: data}.
Essentially, this provides the actual Spin.up and Spin.down data
instead of the total and diff. Note that by definition, a
non-spin-polarized run would have Spin.up data == Spin.down data.
"""
if not self._spin_data:
spin_data = dict()
spin_data[Spin.up] = 0.5 * (self.data["total"] +
self.data.get("diff", 0))
spin_data[Spin.down] = 0.5 * (self.data["total"] -
self.data.get("diff", 0))
self._spin_data = spin_data
return self._spin_data
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def __add__(self, other):
return self.linear_add(other, 1.0)
def __sub__(self, other):
return self.linear_add(other, -1.0)
def copy(self):
"""
:return: Copy of Volumetric object
"""
return VolumetricData(
self.structure,
{k: v.copy() for k, v in self.data.items()},
distance_matrix=self._distance_matrix,
data_aug=self.data_aug
)
def linear_add(self, other, scale_factor=1.0):
"""
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
warnings.warn("Structures are different. Make sure you know what "
"you are doing...")
if self.data.keys() != other.data.keys():
raise ValueError("Data have different keys! Maybe one is spin-"
"polarized and the other is not?")
# To add checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix)
@staticmethod
def parse_file(filename):
"""
Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data)
"""
poscar_read = False
poscar_string = []
dataset = []
all_dataset = []
# for holding any strings in input that are not Poscar
# or VolumetricData (typically augmentation charges)
all_dataset_aug = {}
dim = None
dimline = None
read_dataset = False
ngrid_pts = 0
data_count = 0
poscar = None
with zopen(filename, "rt") as f:
for line in f:
original_line = line
line = line.strip()
if read_dataset:
for tok in line.split():
if data_count < ngrid_pts:
# This complicated procedure is necessary because
# vasp outputs x as the fastest index, followed by y
# then z.
no_x = data_count // dim[0]
dataset[
data_count % dim[0],
no_x % dim[1],
no_x // dim[1]
] = float(tok)
data_count += 1
if data_count >= ngrid_pts:
read_dataset = False
data_count = 0
all_dataset.append(dataset)
elif not poscar_read:
if line != "" or len(poscar_string) == 0:
poscar_string.append(line)
elif line == "":
poscar = Poscar.from_string("\n".join(poscar_string))
poscar_read = True
elif not dim:
dim = [int(i) for i in line.split()]
ngrid_pts = dim[0] * dim[1] * dim[2]
dimline = line
read_dataset = True
dataset = np.zeros(dim)
elif line == dimline:
# when line == dimline, expect volumetric data to follow
# so set read_dataset to True
read_dataset = True
dataset = np.zeros(dim)
else:
# store any extra lines that were not part of the
# volumetric data so we know which set of data the extra
# lines are associated with
key = len(all_dataset) - 1
if key not in all_dataset_aug:
all_dataset_aug[key] = []
all_dataset_aug[key].append(original_line)
if len(all_dataset) == 4:
data = {"total": all_dataset[0], "diff_x": all_dataset[1],
"diff_y": all_dataset[2], "diff_z": all_dataset[3]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff_x": all_dataset_aug.get(1, None),
"diff_y": all_dataset_aug.get(2, None),
"diff_z": all_dataset_aug.get(3, None)}
# construct a "diff" dict for scalar-like magnetization density,
# referenced to an arbitrary direction (using same method as
# pymatgen.electronic_structure.core.Magmom, see
# Magmom documentation for justification for this)
# TODO: re-examine this, and also similar behavior in
# Magmom - @mkhorton
# TODO: does CHGCAR change with different SAXIS?
diff_xyz = np.array([data["diff_x"], data["diff_y"],
data["diff_z"]])
diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2]))
ref_direction = np.array([1.01, 1.02, 1.03])
ref_sign = np.sign(np.dot(ref_direction, diff_xyz))
diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign)
data["diff"] = diff.reshape((dim[0], dim[1], dim[2]))
elif len(all_dataset) == 2:
data = {"total": all_dataset[0], "diff": all_dataset[1]}
data_aug = {"total": all_dataset_aug.get(0, None),
"diff": all_dataset_aug.get(1, None)}
else:
data = {"total": all_dataset[0]}
data_aug = {"total": all_dataset_aug.get(0, None)}
return poscar, data, data_aug
def write_file(self, file_name, vasp4_compatible=False):
"""
Write the VolumetricData object to a vasp compatible file.
Args:
file_name (str): Path to a file
vasp4_compatible (bool): True if the format is vasp4 compatible
"""
def _print_fortran_float(f):
"""
Fortran codes print floats with a leading zero in scientific
notation. When writing CHGCAR files, we adopt this convention
to ensure written CHGCAR files are byte-to-byte identical to
their input files as far as possible.
:param f: float
:return: str
"""
s = "{:.10E}".format(f)
if f >= 0:
return "0." + s[0] + s[2:12] + 'E' + "{:+03}".format(int(s[13:]) + 1)
else:
return "-." + s[1] + s[3:13] + 'E' + "{:+03}".format(int(s[14:]) + 1)
with zopen(file_name, "wt") as f:
p = Poscar(self.structure)
# use original name if it's been set (e.g. from Chgcar)
comment = getattr(self, 'name', p.comment)
lines = comment + "\n"
lines += " 1.00000000000000\n"
latt = self.structure.lattice.matrix
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :])
if not vasp4_compatible:
lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n"
lines += "".join(["%6d" % x for x in p.natoms]) + "\n"
lines += "Direct\n"
for site in self.structure:
lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords)
lines += " \n"
f.write(lines)
a = self.dim
def write_spin(data_type):
lines = []
count = 0
f.write(" {} {} {}\n".format(a[0], a[1], a[2]))
for (k, j, i) in itertools.product(list(range(a[2])),
list(range(a[1])),
list(range(a[0]))):
lines.append(_print_fortran_float(self.data[data_type][i, j, k]))
count += 1
if count % 5 == 0:
f.write(" " + "".join(lines) + "\n")
lines = []
else:
lines.append(" ")
if count % 5 != 0:
f.write(" " + "".join(lines) + " \n")
f.write("".join(self.data_aug.get(data_type, [])))
write_spin("total")
if self.is_spin_polarized and self.is_soc:
write_spin("diff_x")
write_spin("diff_y")
write_spin("diff_z")
elif self.is_spin_polarized:
write_spin("diff")
def get_integrated_diff(self, ind, radius, nbins=1):
"""
Get integrated difference of atom index ind up to radius. This can be
an extremely computationally intensive process, depending on how many
grid points are in the VolumetricData.
Args:
ind (int): Index of atom.
radius (float): Radius of integration.
nbins (int): Number of bins. Defaults to 1. This allows one to
obtain the charge integration up to a list of the cumulative
charge integration values for radii for [radius/nbins,
2 * radius/nbins, ....].
Returns:
Differential integrated charge as a np array of [[radius, value],
...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],
data[:,1])
"""
# For non-spin-polarized runs, this is zero by definition.
if not self.is_spin_polarized:
radii = [radius / nbins * (i + 1) for i in range(nbins)]
data = np.zeros((nbins, 2))
data[:, 0] = radii
return data
struct = self.structure
a = self.dim
if ind not in self._distance_matrix or \
self._distance_matrix[ind]["max_radius"] < radius:
coords = []
for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):
coords.append([x / a[0], y / a[1], z / a[2]])
sites_dist = struct.lattice.get_points_in_sphere(
coords, struct[ind].coords, radius)
self._distance_matrix[ind] = {"max_radius": radius,
"data": np.array(sites_dist)}
data = self._distance_matrix[ind]["data"]
# Use boolean indexing to find all charges within the desired distance.
inds = data[:, 1] <= radius
dists = data[inds, 1]
data_inds = np.rint(np.mod(list(data[inds, 0]), 1) *
np.tile(a, (len(dists), 1))).astype(int)
vals = [self.data["diff"][x, y, z] for x, y, z in data_inds]
hist, edges = np.histogram(dists, bins=nbins,
range=[0, radius],
weights=vals)
data = np.zeros((nbins, 2))
data[:, 0] = edges[1:]
data[:, 1] = [sum(hist[0:i + 1]) / self.ngridpts
for i in range(nbins)]
return data
def get_average_along_axis(self, ind):
"""
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
m = self.data["total"]
ng = self.dim
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
def to_hdf5(self, filename):
"""
Writes the VolumetricData to a HDF5 format, which is a highly optimized
format for reading storing large data. The mapping of the VolumetricData
to this file format is as follows:
VolumetricData.data -> f["vdata"]
VolumetricData.structure ->
f["Z"]: Sequence of atomic numbers
f["fcoords"]: Fractional coords
f["lattice"]: Lattice in the pymatgen.core.lattice.Lattice matrix
format
f.attrs["structure_json"]: String of json representation
Args:
filename (str): Filename to output to.
"""
import h5py
with h5py.File(filename, "w") as f:
ds = f.create_dataset("lattice", (3, 3), dtype='float')
ds[...] = self.structure.lattice.matrix
ds = f.create_dataset("Z", (len(self.structure.species),),
dtype="i")
ds[...] = np.array([sp.Z for sp in self.structure.species])
ds = f.create_dataset("fcoords", self.structure.frac_coords.shape,
dtype='float')
ds[...] = self.structure.frac_coords
dt = h5py.special_dtype(vlen=str)
ds = f.create_dataset("species", (len(self.structure.species),),
dtype=dt)
ds[...] = [str(sp) for sp in self.structure.species]
grp = f.create_group("vdata")
for k, v in self.data.items():
ds = grp.create_dataset(k, self.data[k].shape, dtype='float')
ds[...] = self.data[k]
f.attrs["name"] = self.name
f.attrs["structure_json"] = json.dumps(self.structure.as_dict())
@classmethod
def from_hdf5(cls, filename, **kwargs):
"""
Reads VolumetricData from HDF5 file.
:param filename: Filename
:return: VolumetricData
"""
import h5py
with h5py.File(filename, "r") as f:
data = {k: np.array(v) for k, v in f["vdata"].items()}
data_aug = None
if 'vdata_aug' in f:
data_aug = {k: np.array(v) for k, v in f["vdata_aug"].items()}
structure = Structure.from_dict(json.loads(f.attrs["structure_json"]))
return cls(structure, data=data, data_aug=data_aug, **kwargs)
class Locpot(VolumetricData):
"""
Simple object for reading a LOCPOT file.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
super().__init__(poscar.structure, data)
self.name = poscar.comment
@classmethod
def from_file(cls, filename, **kwargs):
"""
Reads a LOCPOT file.
:param filename: Filename
:return: Locpot
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data, **kwargs)
class Chgcar(VolumetricData):
"""
Simple object for reading a CHGCAR file.
"""
def __init__(self, poscar, data, data_aug=None):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
data_aug: Augmentation charge data
"""
# allow for poscar or structure files to be passed
if isinstance(poscar, Poscar):
tmp_struct = poscar.structure
self.poscar = poscar
self.name = poscar.comment
elif isinstance(poscar, Structure):
tmp_struct = poscar
self.poscar = Poscar(poscar)
self.name = None
super().__init__(tmp_struct, data, data_aug=data_aug)
self._distance_matrix = {}
@staticmethod
def from_file(filename):
"""
Reads a CHGCAR file.
:param filename: Filename
:return: Chgcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Chgcar(poscar, data, data_aug=data_aug)
@property
def net_magnetization(self):
"""
:return: Net magnetization from Chgcar
"""
if self.is_spin_polarized:
return np.sum(self.data['diff'])
else:
return None
class Elfcar(VolumetricData):
"""
Read an ELFCAR file which contains the Electron Localization Function (ELF)
as calculated by VASP.
For ELF, "total" key refers to Spin.up, and "diff" refers to Spin.down.
This also contains information on the kinetic energy density.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
super().__init__(poscar.structure, data)
# TODO: modify VolumetricData so that the correct keys can be used.
# for ELF, instead of "total" and "diff" keys we have
# "Spin.up" and "Spin.down" keys
# I believe this is correct, but there's not much documentation -mkhorton
self.data = data
@classmethod
def from_file(cls, filename):
"""
Reads a ELFCAR file.
:param filename: Filename
:return: Elfcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data)
def get_alpha(self):
"""
Get the parameter alpha where ELF = 1/(1+alpha^2).
"""
alpha_data = {}
for k, v in self.data.items():
alpha = 1 / v
alpha = alpha - 1
alpha = np.sqrt(alpha)
alpha_data[k] = alpha
return VolumetricData(self.structure, alpha_data)
class Procar:
"""
Object for reading a PROCAR file.
.. attribute:: data
The PROCAR data of the form below. It should VASP uses 1-based indexing,
but all indices are converted to 0-based here.::
{
spin: nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
.. attribute:: weights
The weights associated with each k-point as an nd.array of lenght
nkpoints.
..attribute:: phase_factors
Phase factors, where present (e.g. LORBIT = 12). A dict of the form:
{
spin: complex nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
..attribute:: nbands
Number of bands
..attribute:: nkpoints
Number of k-points
..attribute:: nions
Number of ions
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing PROCAR.
"""
headers = None
with zopen(filename, "rt") as f:
preambleexpr = re.compile(
r"# of k-points:\s*(\d+)\s+# of bands:\s*(\d+)\s+# of "
r"ions:\s*(\d+)")
kpointexpr = re.compile(r"^k-point\s+(\d+).*weight = ([0-9\.]+)")
bandexpr = re.compile(r"^band\s+(\d+)")
ionexpr = re.compile(r"^ion.*")
expr = re.compile(r"^([0-9]+)\s+")
current_kpoint = 0
current_band = 0
done = False
spin = Spin.down
weights = None
for l in f:
l = l.strip()
if bandexpr.match(l):
m = bandexpr.match(l)
current_band = int(m.group(1)) - 1
done = False
elif kpointexpr.match(l):
m = kpointexpr.match(l)
current_kpoint = int(m.group(1)) - 1
weights[current_kpoint] = float(m.group(2))
if current_kpoint == 0:
spin = Spin.up if spin == Spin.down else Spin.down
done = False
elif headers is None and ionexpr.match(l):
headers = l.split()
headers.pop(0)
headers.pop(-1)
def f():
return np.zeros((nkpoints, nbands, nions, len(headers)))
data = defaultdict(f)
def f2():
return np.full((nkpoints, nbands, nions, len(headers)),
np.NaN, dtype=np.complex128)
phase_factors = defaultdict(f2)
elif expr.match(l):
toks = l.split()
index = int(toks.pop(0)) - 1
num_data = np.array([float(t) for t in toks[:len(headers)]])
if not done:
data[spin][current_kpoint, current_band, index, :] = num_data
else:
if len(toks) > len(headers):
# new format of PROCAR (vasp 5.4.4)
num_data = np.array([float(t)
for t in toks[:2 * len(headers)]])
for orb in range(len(headers)):
phase_factors[spin][current_kpoint, current_band,
index, orb] = complex(num_data[2 * orb], num_data[2 * orb + 1])
else:
# old format of PROCAR (vasp 5.4.1 and before)
if np.isnan(phase_factors[spin][current_kpoint, current_band, index, 0]):
phase_factors[spin][current_kpoint, current_band, index, :] = num_data
else:
phase_factors[spin][current_kpoint, current_band, index, :] += 1j * num_data
elif l.startswith("tot"):
done = True
elif preambleexpr.match(l):
m = preambleexpr.match(l)
nkpoints = int(m.group(1))
nbands = int(m.group(2))
nions = int(m.group(3))
weights = np.zeros(nkpoints)
self.nkpoints = nkpoints
self.nbands = nbands
self.nions = nions
self.weights = weights
self.orbitals = headers
self.data = data
self.phase_factors = phase_factors
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float)
for i in range(self.nkpoints)]
for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints),
range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
return dico
def get_occupation(self, atom_index, orbital):
"""
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the sum of all s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
"""
orbital_index = self.orbitals.index(orbital)
return {spin: np.sum(d[:, :, atom_index, orbital_index] * self.weights[:, None])
for spin, d in self.data.items()}
class Oszicar:
"""
A basic parser for an OSZICAR output from VASP. In general, while the
OSZICAR is useful for a quick look at the output from a VASP run, we
recommend that you use the Vasprun parser instead, which gives far richer
information about a run.
.. attribute:: electronic_steps
All electronic steps as a list of list of dict. e.g.,
[[{"rms": 160.0, "E": 4507.24605593, "dE": 4507.2, "N": 1,
"deps": -17777.0, "ncg": 16576}, ...], [....]
where electronic_steps[index] refers the list of electronic steps
in one ionic_step, electronic_steps[index][subindex] refers to a
particular electronic step at subindex in ionic step at index. The
dict of properties depends on the type of VASP run, but in general,
"E", "dE" and "rms" should be present in almost all runs.
.. attribute:: ionic_steps:
All ionic_steps as a list of dict, e.g.,
[{"dE": -526.36, "E0": -526.36024, "mag": 0.0, "F": -526.36024},
...]
This is the typical output from VASP at the end of each ionic step.
"""
def __init__(self, filename):
"""
Args:
filename (str): Filename of file to parse
"""
electronic_steps = []
ionic_steps = []
ionic_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)$")
ionic_mag_pattern = re.compile(r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)\s+"
r"mag=\s*([\d\-\.E\+]+)")
ionic_MD_pattern = re.compile(r"(\d+)\s+T=\s*([\d\-\.E\+]+)\s+"
r"E=\s*([\d\-\.E\+]+)\s+"
r"F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"EK=\s*([\d\-\.E\+]+)\s+"
r"SP=\s*([\d\-\.E\+]+)\s+"
r"SK=\s*([\d\-\.E\+]+)")
electronic_pattern = re.compile(r"\s*\w+\s*:(.*)")
def smart_convert(header, num):
try:
if header == "N" or header == "ncg":
v = int(num)
return v
v = float(num)
return v
except ValueError:
return "--"
header = []
with zopen(filename, "rt") as fid:
for line in fid:
line = line.strip()
m = electronic_pattern.match(line)
if m:
toks = m.group(1).split()
data = {header[i]: smart_convert(header[i], toks[i])
for i in range(len(toks))}
if toks[0] == "1":
electronic_steps.append([data])
else:
electronic_steps[-1].append(data)
elif ionic_pattern.match(line.strip()):
m = ionic_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4))})
elif ionic_mag_pattern.match(line.strip()):
m = ionic_mag_pattern.match(line.strip())
ionic_steps.append({"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
"mag": float(m.group(5))})
elif ionic_MD_pattern.match(line.strip()):
m = ionic_MD_pattern.match(line.strip())
ionic_steps.append({"T": float(m.group(2)),
"E": float(m.group(3)),
"F": float(m.group(4)),
"E0": float(m.group(5)),
"EK": float(m.group(6)),
"SP": float(m.group(7)),
"SK": float(m.group(8))})
elif re.match(r"^\s*N\s+E\s*", line):
header = line.strip().replace("d eps", "deps").split()
self.electronic_steps = electronic_steps
self.ionic_steps = ionic_steps
@property
def all_energies(self):
"""
Compilation of all energies from all electronic steps and ionic steps
as a tuple of list of energies, e.g.,
((4507.24605593, 143.824705755, -512.073149912, ...), ...)
"""
all_energies = []
for i in range(len(self.electronic_steps)):
energies = [step["E"] for step in self.electronic_steps[i]]
energies.append(self.ionic_steps[i]["F"])
all_energies.append(tuple(energies))
return tuple(all_energies)
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from run.
"""
return self.ionic_steps[-1]["E0"]
def as_dict(self):
"""
:return: MSONable dict
"""
return {"electronic_steps": self.electronic_steps,
"ionic_steps": self.ionic_steps}
class VaspParserError(Exception):
"""
Exception class for VASP parsing.
"""
pass
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sorted_branch_dir_names = sorted(branch_dir_names, key=lambda x: int(x.split("_")[-1]))
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}".format(dir_name, xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections) \
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None
class Xdatcar:
"""
Class representing an XDATCAR file. Only tested with VASP 5.x files.
.. attribute:: structures
List of structures parsed from XDATCAR.
.. attribute:: comment
Optional comment string.
Authors: Ram Balachandran
"""
def __init__(self, filename, ionicstep_start=1,
ionicstep_end=None, comment=None):
"""
Init a Xdatcar.
Args:
filename (str): Filename of input XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
preamble = None
coords_str = []
structures = []
preamble_done = False
if (ionicstep_start < 1):
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
if ionicstep_cnt >= ionicstep_end:
break
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
self.comment = comment or self.structures[0].formula
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Xdatcar. Similar to 6th line in
vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def concatenate(self, filename, ionicstep_start=1,
ionicstep_end=None):
"""
Concatenate structures in file to Xdatcar.
Args:
filename (str): Filename of XDATCAR file to be concatenated.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
TODO(rambalachandran):
Requires a check to ensure if the new concatenating file has the
same lattice structure and atoms as the Xdatcar class.
"""
preamble = None
coords_str = []
structures = self.structures
preamble_done = False
if ionicstep_start < 1:
raise Exception('Start ionic step cannot be less than 1')
if (ionicstep_end is not None and
ionicstep_start < 1):
raise Exception('End ionic step cannot be less than 1')
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble +
["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
def get_string(self, ionicstep_start=1,
ionicstep_end=None,
significant_figures=8):
"""
Write Xdatcar class to a string.
Args:
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
significant_figures (int): Number of significant figures.
"""
if ionicstep_start < 1:
raise Exception('Start ionic step cannot be less than 1')
if ionicstep_end is not None and ionicstep_end < 1:
raise Exception('End ionic step cannot be less than 1')
latt = self.structures[0].lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
format_str = "{{:.{0}f}}".format(significant_figures)
ionicstep_cnt = 1
output_cnt = 1
for cnt, structure in enumerate(self.structures):
ionicstep_cnt = cnt + 1
if ionicstep_end is None:
if (ionicstep_cnt >= ionicstep_start):
lines.append("Direct configuration=" +
' ' * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
lines.append("Direct configuration=" +
' ' * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
return "\n".join(lines) + "\n"
def write_file(self, filename, **kwargs):
"""
Write Xdatcar class into a file.
Args:
filename (str): Filename of output XDATCAR file.
The supported kwargs are the same as those for the
Xdatcar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def __str__(self):
return self.get_string()
class Dynmat:
"""
Object for reading a DYNMAT file.
.. attribute:: data
A nested dict containing the DYNMAT data of the form::
[atom <int>][disp <int>]['dispvec'] =
displacement vector (part of first line in dynmat block, e.g. "0.01 0 0")
[atom <int>][disp <int>]['dynmat'] =
<list> list of dynmat lines for this atom and this displacement
Authors: Patrick Huck
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing DYNMAT
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
self._nspecs, self._natoms, self._ndisps = map(int, lines[
0].split())
self._masses = map(float, lines[1].split())
self.data = defaultdict(dict)
atom, disp = None, None
for i, l in enumerate(lines[2:]):
v = list(map(float, l.split()))
if not i % (self._natoms + 1):
atom, disp = map(int, v[:2])
if atom not in self.data:
self.data[atom] = {}
if disp not in self.data[atom]:
self.data[atom][disp] = {}
self.data[atom][disp]['dispvec'] = v[2:]
else:
if 'dynmat' not in self.data[atom][disp]:
self.data[atom][disp]['dynmat'] = []
self.data[atom][disp]['dynmat'].append(v)
def get_phonon_frequencies(self):
"""calculate phonon frequencies"""
# TODO: the following is most likely not correct or suboptimal
# hence for demonstration purposes only
frequencies = []
for k, v0 in self.data.iteritems():
for v1 in v0.itervalues():
vec = map(abs, v1['dynmat'][k - 1])
frequency = math.sqrt(sum(vec)) * 2. * math.pi * 15.633302 # THz
frequencies.append(frequency)
return frequencies
@property
def nspecs(self):
"""returns the number of species"""
return self._nspecs
@property
def natoms(self):
"""returns the number of atoms"""
return self._natoms
@property
def ndisps(self):
"""returns the number of displacements"""
return self._ndisps
@property
def masses(self):
"""returns the list of atomic masses"""
return list(self._masses)
def get_adjusted_fermi_level(efermi, cbm, band_structure):
"""
When running a band structure computations the fermi level needs to be
take from the static run that gave the charge density used for the non-self
consistent band structure run. Sometimes this fermi level is however a
little too low because of the mismatch between the uniform grid used in
the static run and the band structure k-points (e.g., the VBM is on Gamma
and the Gamma point is not in the uniform mesh). Here we use a procedure
consisting in looking for energy levels higher than the static fermi level
(but lower than the LUMO) if any of these levels make the band structure
appears insulating and not metallic anymore, we keep this adjusted fermi
level. This procedure has shown to detect correctly most insulators.
Args:
efermi (float): Fermi energy of the static run
cbm (float): Conduction band minimum of the static run
run_bandstructure: a band_structure object
Returns:
a new adjusted fermi level
"""
# make a working copy of band_structure
bs_working = BandStructureSymmLine.from_dict(band_structure.as_dict())
if bs_working.is_metal():
e = efermi
while e < cbm:
e += 0.01
bs_working._efermi = e
if not bs_working.is_metal():
return e
return efermi
class Wavecar:
"""
This is a class that contains the (pseudo-) wavefunctions from VASP.
Coefficients are read from the given WAVECAR file and the corresponding
G-vectors are generated using the algorithm developed in WaveTrans (see
acknowledgments below). To understand how the wavefunctions are evaluated,
please see the evaluate_wavefunc docstring.
It should be noted that the pseudopotential augmentation is not included in
the WAVECAR file. As a result, some caution should be exercised when
deriving value from this information.
The usefulness of this class is to allow the user to do projections or band
unfolding style manipulations of the wavefunction. An example of this can
be seen in the work of Shen et al. 2017
(https://doi.org/10.1103/PhysRevMaterials.1.065001).
.. attribute:: filename
String of the input file (usually WAVECAR)
.. attribute:: nk
Number of k-points from the WAVECAR
.. attribute:: nb
Number of bands per k-point
.. attribute:: encut
Energy cutoff (used to define G_{cut})
.. attribute:: efermi
Fermi energy
.. attribute:: a
Primitive lattice vectors of the cell (e.g. a_1 = self.a[0, :])
.. attribute:: b
Reciprocal lattice vectors of the cell (e.g. b_1 = self.b[0, :])
.. attribute:: vol
The volume of the unit cell in real space
.. attribute:: kpoints
The list of k-points read from the WAVECAR file
.. attribute:: band_energy
The list of band eigenenergies (and corresponding occupancies) for
each kpoint, where the first index corresponds to the index of the
k-point (e.g. self.band_energy[kp])
.. attribute:: Gpoints
The list of generated G-points for each k-point (a double list), which
are used with the coefficients for each k-point and band to recreate
the wavefunction (e.g. self.Gpoints[kp] is the list of G-points for
k-point kp). The G-points depend on the k-point and reciprocal lattice
and therefore are identical for each band at the same k-point. Each
G-point is represented by integer multipliers (e.g. assuming
Gpoints[kp][n] == [n_1, n_2, n_3], then
G_n = n_1*b_1 + n_2*b_2 + n_3*b_3)
.. attribute:: coeffs
The list of coefficients for each k-point and band for reconstructing
the wavefunction. For non-spin-polarized, the first index corresponds
to the kpoint and the second corresponds to the band (e.g.
self.coeffs[kp][b] corresponds to k-point kp and band b). For
spin-polarized calculations, the first index is for the spin.
Acknowledgments:
This code is based upon the Fortran program, WaveTrans, written by
R. M. Feenstra and M. Widom from the Dept. of Physics at Carnegie
Mellon University. To see the original work, please visit:
https://www.andrew.cmu.edu/user/feenstra/wavetrans/
Author: Mark Turiansky
"""
def __init__(self, filename='WAVECAR', verbose=False, precision='normal', gamma=None):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): input file (default: WAVECAR)
verbose (bool): determines whether processing information is shown
precision (str): determines how fine the fft mesh is (normal or
accurate), only the first letter matters
gamma (bool): determines if WAVECAR is assumed to have been generated
by gamma-point only executable
"""
self.filename = filename
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
self._C = 0.262465831
with open(self.filename, 'rb') as f:
# read the header information
recl, spin, rtag = np.fromfile(f, dtype=np.float64, count=3) \
.astype(np.int)
if verbose:
print('recl={}, spin={}, rtag={}'.format(recl, spin, rtag))
recl8 = int(recl / 8)
self.spin = spin
# check that ISPIN wasn't set to 2
# if spin == 2:
# raise ValueError('spin polarization not currently supported')
# check to make sure we have precision correct
if rtag != 45200 and rtag != 45210 and rtag != 53300 and rtag != 53310:
# note that rtag=45200 and 45210 may not work if file was actually
# generated by old version of VASP, since that would write eigenvalues
# and occupations in way that does not span FORTRAN records, but
# reader below appears to assume that record boundaries can be ignored
# (see OUTWAV vs. OUTWAV_4 in vasp fileio.F)
raise ValueError('invalid rtag of {}'.format(rtag))
# padding to end of fortran REC=1
np.fromfile(f, dtype=np.float64, count=(recl8 - 3))
# extract kpoint, bands, energy, and lattice information
self.nk, self.nb, self.encut = np.fromfile(f, dtype=np.float64,
count=3).astype(np.int)
self.a = np.fromfile(f, dtype=np.float64, count=9).reshape((3, 3))
self.efermi = np.fromfile(f, dtype=np.float64, count=1)[0]
if verbose:
print('kpoints = {}, bands = {}, energy cutoff = {}, fermi '
'energy= {:.04f}\n'.format(self.nk, self.nb, self.encut,
self.efermi))
print('primitive lattice vectors = \n{}'.format(self.a))
self.vol = np.dot(self.a[0, :],
np.cross(self.a[1, :], self.a[2, :]))
if verbose:
print('volume = {}\n'.format(self.vol))
# calculate reciprocal lattice
b = np.array([np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :])])
b = 2 * np.pi * b / self.vol
self.b = b
if verbose:
print('reciprocal lattice vectors = \n{}'.format(b))
print('reciprocal lattice vector magnitudes = \n{}\n'
.format(np.linalg.norm(b, axis=1)))
# calculate maximum number of b vectors in each direction
self._generate_nbmax()
if verbose:
print('max number of G values = {}\n\n'.format(self._nbmax))
self.ng = self._nbmax * 3 if precision.lower()[0] == 'n' else \
self._nbmax * 4
# padding to end of fortran REC=2
np.fromfile(f, dtype=np.float64, count=recl8 - 13)
# reading records
# np.set_printoptions(precision=7, suppress=True)
self.Gpoints = [None for _ in range(self.nk)]
self.kpoints = []
if spin == 2:
self.coeffs = [[[None for i in range(self.nb)]
for j in range(self.nk)] for _ in range(spin)]
self.band_energy = [[] for _ in range(spin)]
else:
self.coeffs = [[None for i in range(self.nb)]
for j in range(self.nk)]
self.band_energy = []
for ispin in range(spin):
if verbose:
print('reading spin {}'.format(ispin))
for ink in range(self.nk):
# information for this kpoint
nplane = int(np.fromfile(f, dtype=np.float64, count=1)[0])
kpoint = np.fromfile(f, dtype=np.float64, count=3)
if ispin == 0:
self.kpoints.append(kpoint)
else:
assert np.allclose(self.kpoints[ink], kpoint)
if verbose:
print('kpoint {: 4} with {: 5} plane waves at {}'
.format(ink, nplane, kpoint))
# energy and occupation information
enocc = np.fromfile(f, dtype=np.float64,
count=3 * self.nb).reshape((self.nb, 3))
if spin == 2:
self.band_energy[ispin].append(enocc)
else:
self.band_energy.append(enocc)
if verbose:
print("enocc", enocc[:, [0, 2]])
# padding to end of record that contains nplane, kpoints, evals and occs
np.fromfile(f, dtype=np.float64, count=(recl8 - 4 - 3 * self.nb) % recl8)
# generate G integers
if gamma is not None:
# use it
self.gamma = gamma
(self.Gpoints[ink], extra_gpoints, extra_coeff_inds) = self._generate_G_points(kpoint, gamma)
else:
# try assuming a conventional (non-gamma) calculation
self.gamma = False
(self.Gpoints[ink], extra_gpoints, extra_coeff_inds) = self._generate_G_points(kpoint, False)
initial_generated = len(self.Gpoints[ink])
if gamma is None and len(self.Gpoints[ink]) != nplane:
# failed with conventional, retry with gamma-only format
self.gamma = True
(self.Gpoints[ink], extra_gpoints, extra_coeff_inds) = self._generate_G_points(kpoint, True)
if len(self.Gpoints[ink]) != nplane:
# failed to match number of plane waves for either gamma or non-gamma
if gamma is None:
raise ValueError('failed to generate the correct number of '
'G points generated non-gamma {} gamma-only {}, read in {}'.format(
initial_generated, len(self.Gpoints[ink]), nplane))
else:
raise ValueError('failed to generate the correct '
'number of G points for {} executable '
'generated {} read in {}'.format(
'gamma' if gamma else 'k-points', len(self.Gpoints[ink]), nplane))
if verbose:
print("gamma-only input", gamma, "final", self.gamma)
self.Gpoints[ink] = np.array(self.Gpoints[ink] + extra_gpoints, dtype=np.float64)
# extract coefficients
for inb in range(self.nb):
if rtag == 45200 or rtag == 53300:
data = np.fromfile(f, dtype=np.complex64, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - nplane)
elif rtag == 45210 or rtag == 53310:
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
data = np.fromfile(f, dtype=np.complex128, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - 2 * nplane)
extra_coeffs = []
if len(extra_coeff_inds) > 0:
# reconstruct extra coefficients missing from gamma-only executable WAVECAR
for G_ind in extra_coeff_inds:
# no idea where this factor of sqrt(2) comes from, but empirically
# it appears to be necessary
data[G_ind] /= np.sqrt(2)
extra_coeffs.append(np.conj(data[G_ind]))
if spin == 2:
self.coeffs[ispin][ink][inb] = np.array(list(data) + extra_coeffs, dtype=np.complex64)
else:
self.coeffs[ink][inb] = np.array(list(data) + extra_coeffs, dtype=np.complex128)
def _generate_nbmax(self):
"""
Helper function that determines maximum number of b vectors for
each direction.
This algorithm is adapted from WaveTrans (see Class docstring). There
should be no reason for this function to be called outside of
initialization.
"""
bmag = np.linalg.norm(self.b, axis=1)
b = self.b
# calculate maximum integers in each direction for G
phi12 = np.arccos(np.dot(b[0, :], b[1, :]) / (bmag[0] * bmag[1]))
sphi123 = np.dot(b[2, :], np.cross(b[0, :], b[1, :])) / (bmag[2] * np.linalg.norm(np.cross(b[0, :], b[1, :])))
nbmaxA = np.sqrt(self.encut * self._C) / bmag
nbmaxA[0] /= np.abs(np.sin(phi12))
nbmaxA[1] /= np.abs(np.sin(phi12))
nbmaxA[2] /= np.abs(sphi123)
nbmaxA += 1
phi13 = np.arccos(np.dot(b[0, :], b[2, :]) / (bmag[0] * bmag[2]))
sphi123 = np.dot(b[1, :], np.cross(b[0, :], b[2, :])) / (bmag[1] * np.linalg.norm(np.cross(b[0, :], b[2, :])))
nbmaxB = np.sqrt(self.encut * self._C) / bmag
nbmaxB[0] /= np.abs(np.sin(phi13))
nbmaxB[1] /= np.abs(sphi123)
nbmaxB[2] /= np.abs(np.sin(phi13))
nbmaxB += 1
phi23 = np.arccos(np.dot(b[1, :], b[2, :]) / (bmag[1] * bmag[2]))
sphi123 = np.dot(b[0, :], np.cross(b[1, :], b[2, :])) / (bmag[0] * np.linalg.norm(np.cross(b[1, :], b[2, :])))
nbmaxC = np.sqrt(self.encut * self._C) / bmag
nbmaxC[0] /= np.abs(sphi123)
nbmaxC[1] /= np.abs(np.sin(phi23))
nbmaxC[2] /= np.abs(np.sin(phi23))
nbmaxC += 1
self._nbmax = np.max([nbmaxA, nbmaxB, nbmaxC], axis=0).astype(np.int)
def _generate_G_points(self, kpoint, gamma=False):
"""
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
gamma (bool): determines if G points for gamma-point only executable
should be generated
Returns:
a list containing valid G-points
"""
if gamma:
kmax = self._nbmax[0] + 1
else:
kmax = 2 * self._nbmax[0] + 1
gpoints = []
extra_gpoints = []
extra_coeff_inds = []
G_ind = 0
for i in range(2 * self._nbmax[2] + 1):
i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i
for j in range(2 * self._nbmax[1] + 1):
j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j
for k in range(kmax):
k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k
if gamma and ((k1 == 0 and j2 < 0) or (k1 == 0 and j2 == 0 and i3 < 0)):
continue
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g ** 2 / self._C
if E < self.encut:
gpoints.append(G)
if gamma and (k1, j2, i3) != (0, 0, 0):
extra_gpoints.append(-G)
extra_coeff_inds.append(G_ind)
G_ind += 1
return (gpoints, extra_gpoints, extra_coeff_inds)
def evaluate_wavefunc(self, kpoint, band, r, spin=0):
r"""
Evaluates the wavefunction for a given position, r.
The wavefunction is given by the k-point and band. It is evaluated
at the given position by summing over the components. Formally,
\psi_n^k (r) = \sum_{i=1}^N c_i^{n,k} \exp (i (k + G_i^{n,k}) \cdot r)
where \psi_n^k is the wavefunction for the nth band at k-point k, N is
the number of plane waves, c_i^{n,k} is the ith coefficient that
corresponds to the nth band and k-point k, and G_i^{n,k} is the ith
G-point corresponding to k-point k.
NOTE: This function is very slow; a discrete fourier transform is the
preferred method of evaluation (see Wavecar.fft_mesh).
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
r (np.array): the position where the wavefunction will be evaluated
spin (int): spin index for the desired wavefunction (only for
ISPIN = 2, default = 0)
Returns:
a complex value corresponding to the evaluation of the wavefunction
"""
v = self.Gpoints[kpoint] + self.kpoints[kpoint]
u = np.dot(np.dot(v, self.b), r)
c = self.coeffs[spin][kpoint][band] if self.spin == 2 else \
self.coeffs[kpoint][band]
return np.sum(np.dot(c, np.exp(1j * u, dtype=np.complex64))) / np.sqrt(self.vol)
def fft_mesh(self, kpoint, band, spin=0, shift=True):
"""
Places the coefficients of a wavefunction onto an fft mesh.
Once the mesh has been obtained, a discrete fourier transform can be
used to obtain real-space evaluation of the wavefunction. The output
of this function can be passed directly to numpy's fft function. For
example:
mesh = Wavecar('WAVECAR').fft_mesh(kpoint, band)
evals = np.fft.ifftn(mesh)
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
spin (int): the spin of the wavefunction for the desired
wavefunction (only for ISPIN = 2, default = 0)
shift (bool): determines if the zero frequency coefficient is
placed at index (0, 0, 0) or centered
Returns:
a numpy ndarray representing the 3D mesh of coefficients
"""
mesh = np.zeros(tuple(self.ng), dtype=np.complex)
tcoeffs = self.coeffs[spin][kpoint][band] if self.spin == 2 else \
self.coeffs[kpoint][band]
for gp, coeff in zip(self.Gpoints[kpoint], tcoeffs):
t = tuple(gp.astype(np.int) + (self.ng / 2).astype(np.int))
mesh[t] = coeff
if shift:
return np.fft.ifftshift(mesh)
else:
return mesh
def get_parchg(self, poscar, kpoint, band, spin=None, phase=False,
scale=2):
"""
Generates a Chgcar object, which is the charge density of the specified
wavefunction.
This function generates a Chgcar object with the charge density of the
wavefunction specified by band and kpoint (and spin, if the WAVECAR
corresponds to a spin-polarized calculation). The phase tag is a
feature that is not present in VASP. For a real wavefunction, the phase
tag being turned on means that the charge density is multiplied by the
sign of the wavefunction at that point in space. A warning is generated
if the phase tag is on and the chosen kpoint is not Gamma.
Note: Augmentation from the PAWs is NOT included in this function. The
maximal charge density will differ from the PARCHG from VASP, but the
qualitative shape of the charge density will match.
Args:
poscar (pymatgen.io.vasp.inputs.Poscar): Poscar object that has the
structure associated with the WAVECAR file
kpoint (int): the index of the kpoint for the wavefunction
band (int): the index of the band for the wavefunction
spin (int): optional argument to specify the spin. If the
Wavecar has ISPIN = 2, spin is None generates a
Chgcar with total spin and magnetization, and
spin == {0, 1} specifies just the spin up or
down component.
phase (bool): flag to determine if the charge density is
multiplied by the sign of the wavefunction.
Only valid for real wavefunctions.
scale (int): scaling for the FFT grid. The default value of 2 is
at least as fine as the VASP default.
Returns:
a pymatgen.io.vasp.outputs.Chgcar object
"""
if phase and not np.all(self.kpoints[kpoint] == 0.):
warnings.warn('phase == True should only be used for the Gamma '
'kpoint! I hope you know what you\'re doing!')
# scaling of ng for the fft grid, need to restore value at the end
temp_ng = self.ng
self.ng = self.ng * scale
N = np.prod(self.ng)
data = {}
if self.spin == 2:
if spin is not None:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=spin)) * N
den = np.abs(np.conj(wfr) * wfr)
if phase:
den = np.sign(np.real(wfr)) * den
data['total'] = den
else:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=0)) * N
denup = np.abs(np.conj(wfr) * wfr)
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=1)) * N
dendn = np.abs(np.conj(wfr) * wfr)
data['total'] = denup + dendn
data['diff'] = denup - dendn
else:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band)) * N
den = np.abs(np.conj(wfr) * wfr)
if phase:
den = np.sign(np.real(wfr)) * den
data['total'] = den
self.ng = temp_ng
return Chgcar(poscar, data)
class Eigenval:
"""
Object for reading EIGENVAL file.
.. attribute:: filename
string containing input filename
.. attribute:: occu_tol
tolerance for determining occupation in band properties
.. attribute:: ispin
spin polarization tag (int)
.. attribute:: nelect
number of electrons
.. attribute:: nkpt
number of kpoints
.. attribute:: nbands
number of bands
.. attribute:: kpoints
list of kpoints
.. attribute:: kpoints_weights
weights of each kpoint in the BZ, should sum to 1.
.. attribute:: eigenvalues
Eigenvalues as a dict of {(spin): np.ndarray(shape=(nkpt, nbands, 2))}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
"""
def __init__(self, filename, occu_tol=1e-8):
"""
Reads input from filename to construct Eigenval object
Args:
filename (str): filename of EIGENVAL to read in
occu_tol (float): tolerance for determining band gap
Returns:
a pymatgen.io.vasp.outputs.Eigenval object
"""
self.filename = filename
self.occu_tol = occu_tol
with zopen(filename, 'r') as f:
self.ispin = int(f.readline().split()[-1])
# useless header information
for _ in range(4):
f.readline()
self.nelect, self.nkpt, self.nbands = \
list(map(int, f.readline().split()))
self.kpoints = []
self.kpoints_weights = []
if self.ispin == 2:
self.eigenvalues = \
{Spin.up: np.zeros((self.nkpt, self.nbands, 2)),
Spin.down: np.zeros((self.nkpt, self.nbands, 2))}
else:
self.eigenvalues = \
{Spin.up: np.zeros((self.nkpt, self.nbands, 2))}
ikpt = -1
for line in f:
if re.search(r'(\s+[\-+0-9eE.]+){4}', str(line)):
ikpt += 1
kpt = list(map(float, line.split()))
self.kpoints.append(kpt[:-1])
self.kpoints_weights.append(kpt[-1])
for i in range(self.nbands):
sl = list(map(float, f.readline().split()))
if len(sl) == 3:
self.eigenvalues[Spin.up][ikpt, i, 0] = sl[1]
self.eigenvalues[Spin.up][ikpt, i, 1] = sl[2]
elif len(sl) == 5:
self.eigenvalues[Spin.up][ikpt, i, 0] = sl[1]
self.eigenvalues[Spin.up][ikpt, i, 1] = sl[3]
self.eigenvalues[Spin.down][ikpt, i, 0] = sl[2]
self.eigenvalues[Spin.down][ikpt, i, 1] = sl[4]
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct).
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
for spin, d in self.eigenvalues.items():
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
class Wavederf:
"""
Object for reading a WAVEDERF file.
Note: This file is only produced when LOPTICS is true AND vasp has been
recompiled after uncommenting the line that calls
WRT_CDER_BETWEEN_STATES_FORMATTED in linear_optics.F
.. attribute:: data
A numpy array containing the WAVEDERF data of the form below. It should
be noted that VASP uses 1-based indexing for bands, but this is
converted to 0-based numpy array indexing.
For each kpoint (in the same order as in IBZKPT), and for each pair of
bands:
[ #kpoint index
[ #band 1 index
[ #band 2 index
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
]
]
]
This structure follows the file format. Numpy array methods can be used
to fetch data in a more useful way (e.g., get matrix elements between
wo specific bands at each kpoint, fetch x/y/z components,
real/imaginary parts, abs/phase, etc. )
Author: Miguel Dias Costa
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing WAVEDERF.
"""
with zopen(filename, "rt") as f:
header = f.readline().split()
nb_kpoints = int(header[1])
nb_bands = int(header[2])
data = np.zeros((nb_kpoints, nb_bands, nb_bands, 6))
for ik in range(nb_kpoints):
for ib1 in range(nb_bands):
for ib2 in range(nb_bands):
# each line in the file includes besides the band
# indexes, which are redundant, each band's energy
# and occupation, which are already available elsewhere,
# so we store only the 6 matrix elements after this 6
# redundant values
data[ik][ib1][ib2] = [float(element)
for element in f.readline().split()[6:]]
self.data = data
self._nb_kpoints = nb_kpoints
self._nb_bands = nb_bands
@property
def nb_bands(self):
"""
returns the number of bands in the band structure
"""
return self._nb_bands
@property
def nb_kpoints(self):
"""
Returns the number of k-points in the band structure calculation
"""
return self._nb_kpoints
def get_elements_between_bands(self, band_i, band_j):
"""
Method returning a numpy array with elements
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
between bands band_i and band_j (vasp 1-based indexing) for all kpoints.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
Returns:
a numpy list of elements for each kpoint
"""
if band_i < 1 or band_i > self.nb_bands or band_j < 1 or band_j > self.nb_bands:
raise ValueError("Band index out of bounds")
return self.data[:, band_i - 1, band_j - 1, :]
class Waveder:
"""
Class for reading a WAVEDER file.
The LOPTICS tag produces a WAVEDER file.
The WAVEDER contains the derivative of the orbitals with respect to k.
Author: Kamal Choudhary, NIST
"""
def __init__(self, filename, gamma_only=False):
"""
Args:
filename: Name of file containing WAVEDER.
"""
with open(filename, 'rb') as fp:
def readData(dtype):
""" Read records from Fortran binary file and convert to
np.array of given dtype. """
data = b''
while 1:
prefix = np.fromfile(fp, dtype=np.int32, count=1)[0]
data += fp.read(abs(prefix))
suffix = np.fromfile(fp, dtype=np.int32, count=1)[0]
if abs(prefix) - abs(suffix):
raise RuntimeError("Read wrong amount of bytes.\n"
"Expected: %d, read: %d, suffix: %d." % (prefix, len(data), suffix))
if prefix > 0:
break
return np.frombuffer(data, dtype=dtype)
nbands, nelect, nk, ispin = readData(np.int32)
_ = readData(np.float) # nodes_in_dielectric_function
_ = readData(np.float) # wplasmon
if gamma_only:
cder = readData(np.float)
else:
cder = readData(np.complex64)
cder_data = cder.reshape((3, ispin, nk, nelect, nbands)).T
self._cder_data = cder_data
self._nkpoints = nk
self._ispin = ispin
self._nelect = nelect
self._nbands = nbands
@property
def cder_data(self):
"""
Returns the orbital derivative between states
"""
return self._cder_data
@property
def nbands(self):
"""
Returns the number of bands in the calculation
"""
return self._nbands
@property
def nkpoints(self):
"""
Returns the number of k-points in the calculation
"""
return self._nkpoints
@property
def nelect(self):
"""
Returns the number of electrons in the calculation
"""
return self._nelect
def get_orbital_derivative_between_states(self, band_i, band_j, kpoint, spin, cart_dir):
"""
Method returning a value
between bands band_i and band_j for k-point index, spin-channel and cartesian direction.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
kpoint (Integer): Index of k-point
spin (Integer): Index of spin-channel (0 or 1)
cart_dir (Integer): Index of cartesian direction (0,1,2)
Returns:
a float value
"""
if band_i < 0 or band_i > self.nbands - 1 or band_j < 0 or band_j > self.nelect - 1:
raise ValueError("Band index out of bounds")
if kpoint > self.nkpoints:
raise ValueError("K-point index out of bounds")
if cart_dir > 2 or cart_dir < 0:
raise ValueError("cart_dir index out of bounds")
return self._cder_data[band_i, band_j, kpoint, spin, cart_dir]
class UnconvergedVASPWarning(Warning):
"""
Warning for unconverged vasp run.
"""
pass
|
gVallverdu/pymatgen
|
pymatgen/io/vasp/outputs.py
|
Python
|
mit
| 204,441
|
[
"CRYSTAL",
"VASP",
"VisIt",
"pymatgen"
] |
847238231a248fc5783ba7d9a42aa84f7f9e35edd8e29e6548de0f4ba655b284
|
import os
from .util import OS_NAME
def get_simulated_epw_path():
"""
Returns
-------
None if epw can be anywhere
"""
from oplus import CONF # touchy imports
if OS_NAME == "windows":
return os.path.join(CONF.eplus_base_dir_path, "WeatherData", "%s.epw" % CONF.default_model_name)
# on linux or osx, epw may remain in current directory
|
Openergy/oplus
|
oplus/compatibility/epw.py
|
Python
|
mpl-2.0
| 382
|
[
"EPW"
] |
7be99966771822e8cec5beaf9e0609d8a8e87cd6de98e4901a5d66cfa6f98df3
|
# python imports
import datetime
# django imports
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.template import RequestContext
from django.utils.translation import ugettext as _
# lfs imports
import lfs.core.utils
def send_contact_mail(request, form, template="lfs/mail/contact_mail.html"):
"""Sends an internal mail after a customer as submit the standard contact
form.
"""
shop = lfs.core.utils.get_default_shop()
subject = form.cleaned_data.get('subject', '').strip()
if not subject:
subject = _('New message')
subject = _(u"[%(shop)s contact form] %(subject)s") % {"shop": shop.name, "subject": subject}
from_email = request.POST.get("email")
to = shop.get_notification_emails()
bcc = []
fields = []
for field_name, field in form.fields.items():
if field_name == 'subject':
continue
fields.append({
"label": _(field.label.title()),
"value": form.cleaned_data.get(field_name)
})
text = render_to_string(template, RequestContext(request, {
"date": datetime.datetime.now(),
"shop": shop,
"fields": fields,
}))
# Add Sender header, because eg. Gmail don't like From header set to @gmail
# but email not really sent by gmail servers.
# If email is sent From: @gmail.com it generates the following message in our postfix
# host gmail-smtp-in.l.google.com[74.125.136.26] said: 421-4.7.0 [xx.xx.xx.xx 15]
# Our system has detected an unusual rate of 421-4.7.0 unsolicited mail
# originating from your IP address. To protect our 421-4.7.0 users from
# spam, mail sent from your IP address has been temporarily 421-4.7.0 rate
# limited. Please visit 421-4.7.0 http://www.google.com/mail/help/bulk_mail.html
# to review our Bulk 421 4.7.0 Email Senders Guidelines.
headers = {
'Sender': shop.from_email
}
mail = EmailMultiAlternatives(subject=subject, body="", from_email=from_email, to=to, bcc=bcc, headers=headers)
mail.attach_alternative(text, "text/html")
mail.send()
|
pigletto/lfs-contact
|
lfs_contact/utils.py
|
Python
|
bsd-3-clause
| 2,156
|
[
"VisIt"
] |
8f869a9862a76db8d993817664845a73598f69f05a894894f6ff7f7d677b1fa0
|
# Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" Custom script for Thunderbird 3."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.orca as orca
import orca.cmdnames as cmdnames
import orca.debug as debug
import orca.input_event as input_event
import orca.scripts.default as default
import orca.settings_manager as settings_manager
import orca.orca_state as orca_state
import orca.speech as speech
import orca.scripts.toolkits.Gecko as Gecko
from .speech_generator import SpeechGenerator
from .spellcheck import SpellCheck
_settingsManager = settings_manager.getManager()
########################################################################
# #
# The Thunderbird script class. #
# #
########################################################################
class Script(Gecko.Script):
"""The script for Thunderbird."""
def __init__(self, app):
""" Creates a new script for the given application.
Arguments:
- app: the application to create a script for.
"""
# Store the last autocompleted string for the address fields
# so that we're not too 'chatty'. See bug #533042.
#
self._lastAutoComplete = ""
if _settingsManager.getSetting('sayAllOnLoad') == None:
_settingsManager.setSetting('sayAllOnLoad', False)
Gecko.Script.__init__(self, app)
def setupInputEventHandlers(self):
Gecko.Script.setupInputEventHandlers(self)
self.inputEventHandlers["togglePresentationModeHandler"] = \
input_event.InputEventHandler(
Script.togglePresentationMode,
cmdnames.TOGGLE_PRESENTATION_MODE)
def getSpeechGenerator(self):
"""Returns the speech generator for this script."""
return SpeechGenerator(self)
def getSpellCheck(self):
"""Returns the spellcheck support for this script."""
return SpellCheck(self)
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application."""
grid = Gecko.Script.getAppPreferencesGUI(self)
self._sayAllOnLoadCheckButton.set_active(
_settingsManager.getSetting('sayAllOnLoad'))
spellcheck = self.spellcheck.getAppPreferencesGUI()
grid.attach(spellcheck, 0, len(grid.get_children()), 1, 1)
grid.show_all()
return grid
def getPreferencesFromGUI(self):
"""Returns a dictionary with the app-specific preferences."""
prefs = Gecko.Script.getPreferencesFromGUI(self)
prefs['sayAllOnLoad'] = self._sayAllOnLoadCheckButton.get_active()
prefs.update(self.spellcheck.getPreferencesFromGUI())
return prefs
def doWhereAmI(self, inputEvent, basicOnly):
"""Performs the whereAmI operation."""
if self.spellcheck.isActive():
self.spellcheck.presentErrorDetails(not basicOnly)
super().doWhereAmI(inputEvent, basicOnly)
def locusOfFocusChanged(self, event, oldFocus, newFocus):
"""Handles changes of focus of interest to the script."""
if self.spellcheck.isSuggestionsItem(newFocus):
includeLabel = not self.spellcheck.isSuggestionsItem(oldFocus)
self.updateBraille(newFocus)
self.spellcheck.presentSuggestionListItem(includeLabel=includeLabel)
return
super().locusOfFocusChanged(event, oldFocus, newFocus)
def _useFocusMode(self, obj):
if self.isEditableMessage(obj):
return True
return Gecko.Script._useFocusMode(self, obj)
def togglePresentationMode(self, inputEvent):
if self._inFocusMode and self.isEditableMessage(orca_state.locusOfFocus):
return
Gecko.Script.togglePresentationMode(self, inputEvent)
def useStructuralNavigationModel(self):
"""Returns True if structural navigation should be enabled here."""
if self.isEditableMessage(orca_state.locusOfFocus):
return False
return Gecko.Script.useStructuralNavigationModel(self)
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if not event.detail1:
return
self._lastAutoComplete = ""
self.pointOfReference['lastAutoComplete'] = None
obj = event.source
if self.spellcheck.isAutoFocusEvent(event):
orca.setLocusOfFocus(event, event.source, False)
self.updateBraille(orca_state.locusOfFocus)
if not self.utilities.inDocumentContent(obj):
default.Script.onFocusedChanged(self, event)
return
if self.isEditableMessage(obj):
default.Script.onFocusedChanged(self, event)
return
Gecko.Script.onFocusedChanged(self, event)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
obj = event.source
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME and not event.detail1:
try:
role = orca_state.locusOfFocus.getRole()
except:
pass
else:
if role in [pyatspi.ROLE_FRAME, pyatspi.ROLE_PAGE_TAB]:
orca.setLocusOfFocus(event, event.source, False)
if self.utilities.inDocumentContent():
self.speakMessage(obj.name)
self._presentMessage(obj)
def onCaretMoved(self, event):
"""Callback for object:text-caret-moved accessibility events."""
if self.isEditableMessage(event.source):
if event.detail1 == -1:
return
self.spellcheck.setDocumentPosition(event.source, event.detail1)
if self.spellcheck.isActive():
return
Gecko.Script.onCaretMoved(self, event)
def onChildrenChanged(self, event):
"""Callback for object:children-changed accessibility events."""
default.Script.onChildrenChanged(self, event)
def onSelectionChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
# We present changes when the list has focus via focus-changed events.
if event.source == self.spellcheck.getSuggestionsList():
return
parent = event.source.parent
if parent and parent.getRole() == pyatspi.ROLE_COMBO_BOX \
and not parent.getState().contains(pyatspi.STATE_FOCUSED):
return
Gecko.Script.onSelectionChanged(self, event)
def onSensitiveChanged(self, event):
"""Callback for object:state-changed:sensitive accessibility events."""
if event.source == self.spellcheck.getChangeToEntry() \
and self.spellcheck.presentCompletionMessage():
return
Gecko.Script.onSensitiveChanged(self, event)
def onShowingChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
# TODO - JD: Once there are separate scripts for the Gecko toolkit
# and the Firefox browser, this method can be deleted. It's here
# right now just to prevent the Gecko script from presenting non-
# existent browsery autocompletes for Thunderbird.
default.Script.onShowingChanged(self, event)
def onTextDeleted(self, event):
"""Called whenever text is from an an object.
Arguments:
- event: the Event
"""
obj = event.source
parent = obj.parent
try:
role = event.source.getRole()
parentRole = parent.getRole()
except:
return
if role == pyatspi.ROLE_LABEL and parentRole == pyatspi.ROLE_STATUS_BAR:
return
Gecko.Script.onTextDeleted(self, event)
def onTextInserted(self, event):
"""Callback for object:text-changed:insert accessibility events."""
obj = event.source
try:
role = obj.getRole()
parentRole = obj.parent.getRole()
except:
return
if role == pyatspi.ROLE_LABEL and parentRole == pyatspi.ROLE_STATUS_BAR:
return
if len(event.any_data) > 1 and obj == self.spellcheck.getChangeToEntry():
return
isSystemEvent = event.type.endswith("system")
# Try to stop unwanted chatter when a message is being replied to.
# See bgo#618484.
if isSystemEvent and self.isEditableMessage(obj):
return
# Speak the autocompleted text, but only if it is different
# address so that we're not too "chatty." See bug #533042.
if parentRole == pyatspi.ROLE_AUTOCOMPLETE:
if len(event.any_data) == 1:
default.Script.onTextInserted(self, event)
return
if self._lastAutoComplete and self._lastAutoComplete in event.any_data:
return
# Mozilla cannot seem to get their ":system" suffix right
# to save their lives, so we'll add yet another sad hack.
try:
text = event.source.queryText()
except:
hasSelection = False
else:
hasSelection = text.getNSelections() > 0
if hasSelection or isSystemEvent:
speech.speak(event.any_data)
self._lastAutoComplete = event.any_data
self.pointOfReference['lastAutoComplete'] = hash(obj)
return
Gecko.Script.onTextInserted(self, event)
def onTextSelectionChanged(self, event):
"""Callback for object:text-selection-changed accessibility events."""
obj = event.source
spellCheckEntry = self.spellcheck.getChangeToEntry()
if obj == spellCheckEntry:
return
if self.isEditableMessage(obj) and self.spellcheck.isActive():
text = obj.queryText()
selStart, selEnd = text.getSelection(0)
self.spellcheck.setDocumentPosition(obj, selStart)
return
default.Script.onTextSelectionChanged(self, event)
def onNameChanged(self, event):
"""Callback for object:property-change:accessible-name events."""
if event.source.name == self.spellcheck.getMisspelledWord():
self.spellcheck.presentErrorDetails()
return
obj = event.source
# If the user has just deleted an open mail message, then we want to
# try to speak the new name of the open mail message frame and also
# present the first line of that message to be consistent with what
# we do when a new message window is opened. See bug #540039 for more
# details.
#
rolesList = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_FRAME,
pyatspi.ROLE_APPLICATION]
if self.utilities.hasMatchingHierarchy(event.source, rolesList):
lastKey, mods = self.utilities.lastKeyAndModifiers()
if lastKey == "Delete":
speech.speak(obj.name)
[obj, offset] = self.utilities.findFirstCaretContext(obj, 0)
self.utilities.setCaretPosition(obj, offset)
return
def _presentMessage(self, documentFrame):
"""Presents the first line of the message, or the entire message,
depending on the user's sayAllOnLoad setting."""
[obj, offset] = self.utilities.findFirstCaretContext(documentFrame, 0)
self.utilities.setCaretPosition(obj, offset)
self.updateBraille(obj)
if not _settingsManager.getSetting('sayAllOnLoad'):
contents = self.utilities.getLineContentsAtOffset(obj, offset)
self.speakContents(contents)
elif _settingsManager.getSetting('enableSpeech'):
self.sayAll(None)
def sayCharacter(self, obj):
"""Speaks the character at the current caret position."""
if self.isEditableMessage(obj):
text = self.utilities.queryNonEmptyText(obj)
if text and text.caretOffset + 1 >= text.characterCount:
default.Script.sayCharacter(self, obj)
return
Gecko.Script.sayCharacter(self, obj)
def toggleFlatReviewMode(self, inputEvent=None):
"""Toggles between flat review mode and focus tracking mode."""
# If we're leaving flat review dump the cache. See bug 568658.
#
if self.flatReviewContext:
pyatspi.clearCache()
return default.Script.toggleFlatReviewMode(self, inputEvent)
def isNonHTMLEntry(self, obj):
"""Checks for ROLE_ENTRY areas that are not part of an HTML
document. See bug #607414.
Returns True is this is something like the Subject: entry
"""
result = obj and obj.getRole() == pyatspi.ROLE_ENTRY \
and not self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_DOCUMENT_FRAME], [pyatspi.ROLE_FRAME])
return result
def isEditableMessage(self, obj):
"""Returns True if this is a editable message."""
if not obj:
return False
if not obj.getState().contains(pyatspi.STATE_EDITABLE):
return False
if self.isNonHTMLEntry(obj):
return False
return True
def onWindowActivated(self, event):
"""Callback for window:activate accessibility events."""
Gecko.Script.onWindowActivated(self, event)
if not self.spellcheck.isCheckWindow(event.source):
self.spellcheck.deactivate()
return
self.spellcheck.presentErrorDetails()
orca.setLocusOfFocus(None, self.spellcheck.getChangeToEntry(), False)
self.updateBraille(orca_state.locusOfFocus)
def onWindowDeactivated(self, event):
"""Callback for window:deactivate accessibility events."""
Gecko.Script.onWindowDeactivated(self, event)
self.spellcheck.deactivate()
|
pvagner/orca
|
src/orca/scripts/apps/Thunderbird/script.py
|
Python
|
lgpl-2.1
| 15,166
|
[
"ORCA"
] |
a61df4958f36c9d871d34201391a254305c19dd3d803cbb18ed1682ad224b4b9
|
import os
import unittest
import numpy as np
import xarray as xr
import xcube.core.store as xcube_store
from cate.core.ds import DataAccessError
from cate.core.ds import NetworkError
from cate.core.ds import find_data_store
from cate.core.ds import get_data_descriptor
from cate.core.ds import get_ext_chunk_sizes
from cate.core.ds import get_metadata_from_descriptor
from cate.core.ds import get_spatial_ext_chunk_sizes
from cate.core.ds import open_dataset
from cate.core.types import ValidationError
from xcube.core.store import DataStoreError
from ..storetest import StoreTest
_TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_data')
class IOTest(StoreTest):
def test_find_data_store(self):
aerosol_store_id, aerosol_store = find_data_store(
'20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc')
self.assertEqual('local_test_store_1', aerosol_store_id)
self.assertIsNotNone(aerosol_store)
ozone_store_id, ozone_store = \
find_data_store('ESACCI-OZONE-L3S-TC-MERGED-DLR_1M-20050501-fv0100.nc')
self.assertEqual('local_test_store_2', ozone_store_id)
self.assertIsNotNone(ozone_store)
permafrost_store_id, permafrost_store = find_data_store('permafrost')
self.assertIsNone(permafrost_store_id)
self.assertIsNone(permafrost_store)
with self.assertRaises(ValidationError):
find_data_store('ESACCI-OC-L3S-IOP-MERGED-1M_MONTHLY_4km_GEO_PML_OCx_QAA-200505'
'-fv4.2.nc')
def test_get_data_descriptor(self):
aerosol_descriptor = get_data_descriptor(
'20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc')
self.assertIsInstance(aerosol_descriptor, xcube_store.DatasetDescriptor)
self.assertEqual('20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc',
aerosol_descriptor.data_id)
sst_descriptor = get_data_descriptor(
'19910916120000-ESACCI-L3C_GHRSST-SSTskin-AVHRR12_G-CDR2.1_night-v02.0-fv01.0.nc')
self.assertIsInstance(sst_descriptor, xcube_store.DatasetDescriptor)
self.assertEqual(
'19910916120000-ESACCI-L3C_GHRSST-SSTskin-AVHRR12_G-CDR2.1_night-v02.0-fv01.0.nc',
sst_descriptor.data_id)
permafrost_descriptor = get_data_descriptor('permafrost')
self.assertIsNone(permafrost_descriptor)
with self.assertRaises(ValidationError):
get_data_descriptor(
'ESACCI-OC-L3S-IOP-MERGED-1M_MONTHLY_4km_GEO_PML_OCx_QAA-200505-fv4.2.nc')
def test_get_metadata_from_descriptor(self):
descriptor = xcube_store.DatasetDescriptor(
data_id='xyz',
crs='EPSG:9346',
bbox=(10., 20., 30., 40.),
spatial_res=20.,
time_range=('2017-06-05', '2017-06-27'),
time_period='daily',
coords={
'lon': xcube_store.VariableDescriptor(
name='lon',
dtype='float32',
dims=('lon',),
attrs=dict(units='degrees',
long_name='longitude',
standard_name='longitude')),
'lat': xcube_store.VariableDescriptor(
name='lat',
dtype='float32',
dims=('lat',),
attrs=dict(units='degrees',
long_name='latitude',
standard_name='latitude')),
'time': xcube_store.VariableDescriptor(
name='time',
dtype='datetime64[ms]',
dims=('time',),
attrs=dict(units='milliseconds since 1970-01-01T00:00:00',
long_name='time',
standard_name='time'))
},
data_vars={
'surface_pressure': xcube_store.VariableDescriptor(
name='surface_pressure',
dtype='float32',
dims=('time', 'lat', 'lon'),
attrs=dict(units='hPa',
long_name='surface_pressure',
standard_name='surface_pressure'))
},
attrs=dict(
title='ESA Ozone Climate Change Initiative (Ozone CCI): '
'Level 3 Nadir Ozone Profile Merged Data Product, version 2',
institution='Royal Netherlands Meteorological Institute, KNMI',
source='This dataset contains L2 profiles from GOME, SCIAMACHY, OMI and GOME-2 '
'gridded onto a global grid.',
history='L2 data gridded to global grid.',
references='http://www.esa-ozone-cci.org/',
tracking_id='32CF0EE6-1F21-4FAE-B0BE-A8C6FD88A775',
Conventions='CF-1.6',
product_version='fv0002',
summary='This dataset contains L2 profiles from GOME, SCIAMACHY, OMI and GOME-2 '
'gridded onto a global grid.',
keywords='satellite, observation, atmosphere, ozone',
id='32CF0EE6-1F21-4FAE-B0BE-A8C6FD88A775',
naming_authority='KNMI, http://www.knmi.nl/',
comment='These data were produced at KNMI as part of the ESA OZONE CCI project.',
date_created='2014-01-08T12:50:21.908',
creator_name='J.C.A. van Peet',
creator_url='KNMI, http://www.knmi.nl/',
creator_email='peet@knmi.nl',
project='Climate Change Initiative - European Space Agency',
geospatial_lat_min=-90.0,
geospatial_lat_max=90.0,
geospatial_lat_units='degree_north',
geospatial_lat_resolution=1.0,
geospatial_lon_min=-180.0,
geospatial_lon_max=180.0,
geospatial_lon_units='degree_east',
geospatial_lon_resolution=1.0,
geospatial_vertical_min=0.01,
geospatial_vertical_max=1013.0,
time_coverage_start='19970104T102333Z',
time_coverage_end='19970131T233849Z',
time_coverage_duration='P1M',
time_coverage_resolution='P1M',
standard_name_vocabulary='NetCDF Climate and Forecast(CF) Metadata Convention '
'version 20, 11 September 2012',
license='data use is free and open',
platform='merged: ERS-2, ENVISAT, EOS-AURA, METOP-A',
sensor='merged: GOME, SCIAMACHY, OMI and GOME-2.',
spatial_resolution='see geospatial_lat_resolution and geospatial_lat_resolution',
Note='netCDF compression applied.',
ecv='OZONE',
time_frequency='month',
institute='Royal Netherlands Meteorological Institute',
processing_level='L3',
product_string='MERGED',
data_type='NP',
file_formats=['.nc', '.txt']
)
)
descriptor_metadata = get_metadata_from_descriptor(descriptor)
expected_metadata = dict(
data_id='xyz',
type_specifier='dataset',
crs='EPSG:9346',
bbox=(10., 20., 30., 40.),
spatial_res=20.,
time_range=('2017-06-05', '2017-06-27'),
time_period='daily',
title='ESA Ozone Climate Change Initiative (Ozone CCI): '
'Level 3 Nadir Ozone Profile Merged Data Product, version 2',
product_version='fv0002',
ecv='OZONE',
time_frequency='month',
institute='Royal Netherlands Meteorological Institute',
processing_level='L3',
product_string='MERGED',
data_type='NP',
file_formats=['.nc', '.txt'],
data_vars=[
{'name': 'surface_pressure',
'dtype': 'float32',
'dims': ('time', 'lat', 'lon'),
'long_name': 'surface_pressure',
'standard_name': 'surface_pressure',
'units': 'hPa'}
],
coords=[
{'name': 'lon',
'dtype': 'float32',
'dims': ('lon',),
'long_name': 'longitude',
'standard_name': 'longitude',
'units': 'degrees'},
{'name': 'lat',
'dtype': 'float32',
'dims': ('lat',),
'long_name': 'latitude',
'standard_name': 'latitude',
'units': 'degrees'},
{'name': 'time',
'dtype': 'datetime64[ms]',
'dims': ('time',),
'long_name': 'time',
'standard_name': 'time',
'units': 'milliseconds since 1970-01-01T00:00:00'}
],
)
self.assertEqual(expected_metadata, descriptor_metadata)
def test_open_dataset(self):
with self.assertRaises(ValueError) as cm:
# noinspection PyTypeChecker
open_dataset(None)
self.assertTupleEqual(('No data source given',), cm.exception.args)
with self.assertRaises(ValueError) as cm:
open_dataset('foo')
self.assertEqual(("No data store found that contains the ID 'foo'",), cm.exception.args)
aerosol_dataset, aerosol_dataset_name = \
open_dataset('20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc')
self.assertIsNotNone(aerosol_dataset)
self.assertEqual('20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc',
aerosol_dataset_name)
self.assertIsInstance(aerosol_dataset, xr.Dataset)
self.assertEqual({'ANG550-670_mean', 'AOD550_uncertainty_mean'},
set(aerosol_dataset.data_vars))
with self.assertRaises(DataStoreError) as cm:
open_dataset('20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc',
data_store_id='unknown_store')
self.assertEqual(('Configured data store instance "unknown_store" not found.',),
cm.exception.args)
aerosol_dataset, aerosol_dataset_name = \
open_dataset('20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc',
data_store_id='local_test_store_1')
self.assertIsNotNone(aerosol_dataset)
self.assertEqual('20000302-ESACCI-L3C_AEROSOL-AER_PRODUCTS-ATSR2-ERS2-ADV_DAILY-v2.30.nc',
aerosol_dataset_name)
self.assertIsInstance(aerosol_dataset, xr.Dataset)
self.assertEqual({'ANG550-670_mean', 'AOD550_uncertainty_mean'},
set(aerosol_dataset.data_vars))
class ChunkUtilsTest(unittest.TestCase):
def test_get_spatial_ext_chunk_sizes(self):
ds = xr.Dataset({
'v1': (['lat', 'lon'], np.zeros([45, 90])),
'v2': (['lat', 'lon'], np.zeros([45, 90])),
'v3': (['lon'], np.zeros(90)),
'lon': (['lon'], np.linspace(-178, 178, 90)),
'lat': (['lat'], np.linspace(-88, 88, 45))})
np.linspace
ds.v1.encoding['chunksizes'] = (5, 10)
ds.v2.encoding['chunksizes'] = (15, 30)
chunk_sizes = get_spatial_ext_chunk_sizes(ds)
self.assertIsNotNone(chunk_sizes)
self.assertEqual(chunk_sizes, dict(lat=15, lon=30))
def test_get_spatial_ext_chunk_sizes_wo_lat_lon(self):
ds = xr.Dataset({
'v1': (['lon'], np.zeros([90])),
'v2': (['lat'], np.zeros([45]))})
ds.v1.encoding['chunksizes'] = (90,)
ds.v2.encoding['chunksizes'] = (45,)
chunk_sizes = get_spatial_ext_chunk_sizes(ds)
self.assertIsNone(chunk_sizes)
def test_get_spatial_ext_chunk_sizes_with_time(self):
ds = xr.Dataset({
'v1': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'v2': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'v3': (['lon'], np.zeros(90)),
'lon': (['lon'], np.linspace(-178, 178, 90)),
'lat': (['lat'], np.linspace(-88, 88, 45)),
'time': (['time'], np.linspace(0, 1, 12))})
ds.v1.encoding['chunksizes'] = (1, 5, 10)
ds.v2.encoding['chunksizes'] = (1, 15, 30)
ds.v3.encoding['chunksizes'] = (90,)
chunk_sizes = get_spatial_ext_chunk_sizes(ds)
self.assertIsNotNone(chunk_sizes)
self.assertEqual(chunk_sizes, dict(lat=15, lon=30))
def test_get_ext_chunk_sizes(self):
ds = xr.Dataset({
'v1': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'v2': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'v3': (['time', 'lat', 'lon'], np.zeros([12, 45, 90])),
'v4': (['lat', 'lon'], np.zeros([45, 90])),
'v5': (['time'], np.zeros(12)),
'lon': (['lon'], np.linspace(-178, 178, 90)),
'lat': (['lat'], np.linspace(-88, 88, 45)),
'time': (['time'], np.linspace(0, 1, 12))})
ds.v1.encoding['chunksizes'] = (12, 5, 10)
ds.v2.encoding['chunksizes'] = (12, 15, 30)
ds.v3.encoding['chunksizes'] = (12, 5, 10)
ds.v4.encoding['chunksizes'] = (5, 10)
ds.v5.encoding['chunksizes'] = (1,)
def map_fn(size, prev_value):
"""Collect all sizes."""
return [size] + prev_value
def reduce_fn(values):
"""Median."""
values = sorted(values)
return values[len(values) // 2]
chunk_sizes = get_ext_chunk_sizes(ds, init_value=[], map_fn=map_fn, reduce_fn=reduce_fn)
self.assertIsNotNone(chunk_sizes)
self.assertEqual(chunk_sizes, dict(time=12, lat=5, lon=10))
class DataAccessErrorTest(unittest.TestCase):
def test_plain(self):
try:
raise DataAccessError("haha")
except DataAccessError as e:
self.assertEqual(str(e), "haha")
self.assertIsInstance(e, Exception)
class NetworkErrorTest(unittest.TestCase):
def test_plain(self):
try:
raise NetworkError("hoho")
except NetworkError as e:
self.assertEqual(str(e), "hoho")
self.assertIsInstance(e, ConnectionError)
|
CCI-Tools/cate-core
|
tests/core/test_ds.py
|
Python
|
mit
| 14,649
|
[
"NetCDF"
] |
3c051320e14733c14ab572cd6ecaa0a6d57e676c39b74f0c4768bd3ccfe7ee8b
|
"""
Tools for the instructor dashboard
"""
import json
import operator
import dateutil
import six
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from django.utils.translation import ugettext as _
from edx_when import api
from opaque_keys.edx.keys import UsageKey
from pytz import UTC
from six import string_types, text_type
from six.moves import zip
from student.models import get_user_by_username_or_email, CourseEnrollment
class DashboardError(Exception):
"""
Errors arising from use of the instructor dashboard.
"""
def response(self):
"""
Generate an instance of HttpResponseBadRequest for this error.
"""
error = six.text_type(self)
return HttpResponseBadRequest(json.dumps({'error': error}))
def handle_dashboard_error(view):
"""
Decorator which adds seamless DashboardError handling to a view. If a
DashboardError is raised during view processing, an HttpResponseBadRequest
is sent back to the client with JSON data about the error.
"""
def wrapper(request, course_id):
"""
Wrap the view.
"""
try:
return view(request, course_id=course_id)
except DashboardError as error:
return error.response()
return wrapper
def strip_if_string(value):
if isinstance(value, string_types):
return value.strip()
return value
def get_student_from_identifier(unique_student_identifier):
"""
Gets a student object using either an email address or username.
Returns the student object associated with `unique_student_identifier`
Raises User.DoesNotExist if no user object can be found, the user was
retired, or the user is in the process of being retired.
DEPRECATED: use student.models.get_user_by_username_or_email instead.
"""
return get_user_by_username_or_email(unique_student_identifier)
def require_student_from_identifier(unique_student_identifier):
"""
Same as get_student_from_identifier() but will raise a DashboardError if
the student does not exist.
"""
try:
return get_student_from_identifier(unique_student_identifier)
except User.DoesNotExist:
raise DashboardError(
_(u"Could not find student matching identifier: {student_identifier}").format(
student_identifier=unique_student_identifier
)
)
def parse_datetime(datestr):
"""
Convert user input date string into an instance of `datetime.datetime` in
UTC.
"""
try:
return dateutil.parser.parse(datestr).replace(tzinfo=UTC)
except ValueError:
raise DashboardError(_("Unable to parse date: ") + datestr)
def find_unit(course, url):
"""
Finds the unit (block, module, whatever the terminology is) with the given
url in the course tree and returns the unit. Raises DashboardError if no
unit is found.
"""
def find(node, url):
"""
Find node in course tree for url.
"""
if text_type(node.location) == url:
return node
for child in node.get_children():
found = find(child, url)
if found:
return found
return None
unit = find(course, url)
if unit is None:
raise DashboardError(_(u"Couldn't find module for url: {0}").format(url))
return unit
def get_units_with_due_date(course):
"""
Returns all top level units which have due dates. Does not return
descendents of those nodes.
"""
units = []
def visit(node):
"""
Visit a node. Checks to see if node has a due date and appends to
`units` if it does. Otherwise recurses into children to search for
nodes with due dates.
"""
if getattr(node, 'due', None):
units.append(node)
else:
for child in node.get_children():
visit(child)
visit(course)
#units.sort(key=_title_or_url)
return units
def title_or_url(node):
"""
Returns the `display_name` attribute of the passed in node of the course
tree, if it has one. Otherwise returns the node's url.
"""
title = getattr(node, 'display_name', None)
if not title:
title = text_type(node.location)
return title
def set_due_date_extension(course, unit, student, due_date, actor=None, reason=''):
"""
Sets a due date extension.
Raises:
DashboardError if the unit or extended, due date is invalid or user is
not enrolled in the course.
"""
mode, __ = CourseEnrollment.enrollment_mode_for_user(user=student, course_id=six.text_type(course.id))
if not mode:
raise DashboardError(_("Could not find student enrollment in the course."))
if due_date:
try:
api.set_date_for_block(course.id, unit.location, 'due', due_date, user=student, reason=reason, actor=actor)
except api.MissingDateError:
raise DashboardError(_(u"Unit {0} has no due date to extend.").format(unit.location))
except api.InvalidDateError:
raise DashboardError(_("An extended due date must be later than the original due date."))
else:
api.set_date_for_block(course.id, unit.location, 'due', None, user=student, reason=reason, actor=actor)
def dump_module_extensions(course, unit):
"""
Dumps data about students with due date extensions for a particular module,
specified by 'url', in a particular course.
"""
header = [_("Username"), _("Full Name"), _("Extended Due Date")]
data = []
for username, fullname, due_date in api.get_overrides_for_block(course.id, unit.location):
due_date = due_date.strftime(u'%Y-%m-%d %H:%M')
data.append(dict(list(zip(header, (username, fullname, due_date)))))
data.sort(key=operator.itemgetter(_("Username")))
return {
"header": header,
"title": _(u"Users with due date extensions for {0}").format(
title_or_url(unit)),
"data": data
}
def dump_student_extensions(course, student):
"""
Dumps data about the due date extensions granted for a particular student
in a particular course.
"""
data = []
header = [_("Unit"), _("Extended Due Date")]
units = get_units_with_due_date(course)
units = {u.location: u for u in units}
query = api.get_overrides_for_user(course.id, student)
for override in query:
location = override['location'].replace(course_key=course.id)
if location not in units:
continue
due = override['actual_date']
due = due.strftime(u"%Y-%m-%d %H:%M")
title = title_or_url(units[location])
data.append(dict(list(zip(header, (title, due)))))
data.sort(key=operator.itemgetter(_("Unit")))
return {
"header": header,
"title": _(u"Due date extensions for {0} {1} ({2})").format(
student.first_name, student.last_name, student.username),
"data": data}
def add_block_ids(payload):
"""
rather than manually parsing block_ids from module_ids on the client, pass the block_ids explicitly in the payload
"""
if 'data' in payload:
for ele in payload['data']:
if 'module_id' in ele:
ele['block_id'] = UsageKey.from_string(ele['module_id']).block_id
|
edx-solutions/edx-platform
|
lms/djangoapps/instructor/views/tools.py
|
Python
|
agpl-3.0
| 7,408
|
[
"VisIt"
] |
7714dc2c82ecd0c355aac98e91dd11289e47ee7f8231702e316ab3a28c213469
|
#!/usr/bin/env python
""" Defines the synthetic_image class to add image realism to the idealized sunrise images.
The synthetic_image class defines a number of routines to take the original image and
convolve it with some psf function, add sky noise, rebin to an appropate pixel scale
(based on telescope), scale to an approparte image size (based on a petrosian radius
calculation, and add background image (SDSS only supported at the moment).
The majority of the code in this file was developed by Greg Snyder and can be found in
Snyder et al., (2015), http://arxiv.org/abs/1502.07747
"""
import numpy as np
import os
import sys
import math
import gc
try:
import astropy.io.fits as fits
print "loaded astropy.io.fits"
except:
try:
import pyfits as fits
print "loaded pyfits"
except:
print "Error: Unable to access PyFITS or AstroPy modules."
print "Add PyFITS to your site-packages with:"
print "% pip install pyfits\n"
print " or "
print "% easy_install pyfits\n"
print " or "
print "download at: www.stsci.edu/institute/software_hardware/pyfits/Download\n"
import cosmocalc
import scipy as sp
import scipy.ndimage
import scipy.signal
import scipy.interpolate
try:
import astropy.convolution.convolve as convolve ; CONVOLVE_TYPE='astropy'
print "loaded astropy.convolution.convolve"
from astropy.convolution import *
except:
try:
from scipy.signal import convolve2d as convolve ; CONVOLVE_TYPE='scipy'
print "loaded scipy.signal.convolve2d; note that the astropy.convolution.convolve() function is preferred. There may be unexpected sub-pixel or off-by-one behavior with this scipy function."
except:
print "Error: Unable to access SciPy or AstroPy convolution modules."
import sunpy.sunpy__load
import time
import wget
import warnings
__author__ = "Paul Torrey and Greg Snyder"
__copyright__ = "Copyright 2014, The Authors"
__credits__ = ["Paul Torrey", "Greg Snyder"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Paul Torrey"
__email__ = "ptorrey@mit.harvard.edu"
__status__ = "Production"
if __name__ == '__main__': #code to execute if called from command-line
pass #do nothing
verbose=False
abs_dist = 0.01
erg_per_joule = 1e7
speedoflight_m = 2.99e8
m2_to_cm2 = 1.0e-4
n_arcsec_per_str = 4.255e10 # (radian per arc second)^2
n_pixels_galaxy_zoo = 424
###########################################################
# SDSS background images created by Greg Snyder on 6/18/14#
# SDSS background obtained from: data.sdss3.org/mosaics #
# Ra = 175.0
# Dec = 30.0
# Size (deg) = 0.5
# Pixel Scale = 0.24 "/pixel
#
# HST backgrounds provided by Erica Nelson and Pascal Oesch
# and integrated here by P. Torrey
# #########################################################
dl_base="http://www.illustris-project.org/files/backgrounds"
bg_base='./data/'
backgrounds = [ [], [], # GALEX 0 1
[bg_base+'/SDSS_backgrounds/J113959.99+300000.0-u.fits'], # 2 SDSS-u
[bg_base+'/SDSS_backgrounds/J113959.99+300000.0-g.fits'], # 3 SDSS-g
[bg_base+'/SDSS_backgrounds/J113959.99+300000.0-r.fits'], # 4 SDSS-r
[bg_base+'/SDSS_backgrounds/J113959.99+300000.0-i.fits'], # 5 SDSS-i
[bg_base+'/SDSS_backgrounds/J113959.99+300000.0-z.fits'], # 6 SDSS-z
[], [], [], [], # 7-8-9-10 IRAC
[], [], [], [], [], [], [], [], [], [], # 11-12-13-14-15-16-17-18 JOHNSON/COUSINS + 2 mass
[bg_base+'/HST_backgrounds/xdf_noise_F775W_30mas.fits'], #21 ACS-435
[bg_base+'/HST_backgrounds/GOODSN_F606W.fits'], #22 ACS-606
[bg_base+'/HST_backgrounds/xdf_noise_F775W_30mas.fits'], #23 ACS-775
[bg_base+'/HST_backgrounds/xdf_noise_F775W_30mas.fits'], #24 ACS-850
[bg_base+'/HST_backgrounds/GOODSN_F125W.fits'], #25 f105w
[bg_base+'/HST_backgrounds/GOODSN_F125W.fits'], #26 f125w
[bg_base+'/HST_backgrounds/GOODSN_F160W.fits'], #27 f160w
[], [], [], [], [], [], [], [] # NIRCAM
]
bg_zpt = { "u_SDSS.res":[22.5],
"g_SDSS.res":[22.5],
"r_SDSS.res":[22.5],
"i_SDSS.res":[22.5],
"z_SDSS.res":[22.5]}
#bg_zpt = [ [], [], # GALEX
# [22.5],
# [22.5],
# [22.5],
# [22.5],
# [22.5],
# [], [], [], [], # 7-8-9-10 IRAC
# [], [], [], [], [], [], [], [], [], [], # 11-12-13-14-15-16-17-18 JOHNSON/COUSINS + 2 mass
# [25.69],
# [25.69],
# [25.69],
# [25.69],
# [25.69],
# [25.69],
# [25.69],
# [], [], [], [], [], [], [], [] # NIRCAM
# ]
def build_synthetic_image(filename, band, r_petro_kpc=None, openlist=None, **kwargs):
""" build a synthetic image from a SUNRISE fits file and return the image to the user """
obj = synthetic_image(filename, band=band, r_petro_kpc=r_petro_kpc, openlist=openlist, **kwargs)
image = obj.bg_image.return_image()
rp = obj.r_petro_kpc
seed = obj.seed
failed= obj.bg_failed
fitsfn = obj.fitsfn
openlist = obj.openlist
del obj
gc.collect()
return image, rp, seed, failed, fitsfn, openlist
def load_resolved_broadband_apparent_magnitudes(filename, redshift, camera=0, seed=12345, n_bands=36, **kwargs):
""" loads n_band x n_pix x n_pix image array with apparent mags for synthetic images """
mags = sunpy.sunpy__load.load_all_broadband_photometry(filename, camera=0)
for band in np.arange(n_bands):
obj = synthetic_image(filename, band=int(band), seed=seed, redshift=redshift, **kwargs)
img = obj.bg_image.return_image() # muJy / str
if band==0:
n_pixels = img.shape[0]
all_images = np.zeros( (n_bands, n_pixels, n_pixels ) )
all_images[band, :, :] = img # muJy / str
pixel_in_sr = (1e3*obj.bg_image.pixel_in_kpc /10.0)**2
all_images *= pixel_in_sr / 1e6 # in Jy
for band in np.arange(n_bands):
tot_img_in_Jy = np.sum(all_images[band,:,:]) # total image flux in Jy
abmag = -2.5 * np.log10(tot_img_in_Jy / 3631 )
if verbose:
print "the ab magnitude of band "+str(band)+" is :"+str(abmag)+" "+str(mags[band])
print abmag/mags[band], abmag - mags[band]
print " "
all_images = -2.5 * np.log10( all_images / 3631 ) # abmag in each pixel
dist = (cosmocalc.cosmocalc(redshift, H0=70.4, WM=0.2726, WV=0.7274))['DL_Mpc'] * 1e6
dist_modulus = 5.0 * ( np.log10(dist) - 1.0 )
apparent_magnitudes = dist_modulus + all_images
del mags, obj, img, n_pixels, all_images, pixel_in_sr, tot_img_in_Jy, abmag, dist, dist_modulus
gc.collect()
return apparent_magnitudes
class synthetic_image:
""" main class for loading and manipulating SUNRISE data into real data format """
def __init__(self,
filename, band=0, camera=0,
redshift=0.05,
psf_fwhm_arcsec=1.0, pixelsize_arcsec=0.24,
r_petro_kpc=None, save_fits=False,
seed=None,
add_background=True,
add_psf=True,
add_noise=True,
rebin_phys=True,
rebin_gz=False,
n_target_pixels=n_pixels_galaxy_zoo,
resize_rp=True,
sn_limit=25.0,
sky_sig=None,
verbose=False,
fix_seed=True,
bg_tag=None,
bb_label='broadband_',
output_label='',
psf_fits=None,
psf_pixsize_arcsec=None,
psf_truncate_pixels=None,
psf_hdu_num = 0,
custom_fitsfile=None,
bb_header=None,
openlist=None,
**kwargs):
if (not os.path.exists(filename)):
print "file not found:", filename
sys.exit()
start_time = time.time()
self.filename = filename
self.cosmology = cosmology(redshift)
self.telescope = telescope(psf_fwhm_arcsec, pixelsize_arcsec, psf_fits, psf_pixsize_arcsec, rebin_phys, add_psf, psf_truncate_pixels,psf_hdu_num)
band_names = sunpy.sunpy__load.load_broadband_names(filename)
hdulist = fits.open(filename)
if type(band) is not int:
band = int( np.where([this_band == band for this_band in band_names])[0][0] )
self.camera = camera
self.band = band
self.band_name = band_names[band]
self.image_header = hdulist['CAMERA'+str(camera)+'-BROADBAND-NONSCATTER'].header
bb_header = self.image_header
self.broadband_header = hdulist['BROADBAND'].header
self.param_header = hdulist['CAMERA'+str(camera)+'-PARAMETERS'].header
self.int_quant_data = hdulist['INTEGRATED_QUANTITIES'].data
self.filter_data = hdulist['FILTERS'].data
self.lambda_eff = (self.filter_data['lambda_eff'])[band]
self.ewidth_lambda = (self.filter_data['ewidth_lambda'])[band]
self.ewidth_nu = (self.filter_data['ewidth_nu'])[band]
self.sunrise_absolute_mag = (self.filter_data['AB_mag_nonscatter'+str(self.camera)])[band]
hdulist.close()
#============= DECLARE ALL IMAGES HERE =================#
self.sunrise_image = single_image() # orig sunrise image
self.psf_image = single_image() # supersampled image + psf convolution
self.rebinned_image = single_image() # rebinned by appropriate pixel scale
self.noisy_image = single_image() # noise added via gaussian draw
self.nmag_image = single_image() # converted to nanomaggies units
self.rp_image = single_image() # scale image based on rp radius criteria (for GZ)
self.bg_image = single_image() # add backgrounds (only possible for 5 SDSS bands at the moment)
#============ SET ORIGINAL IMAGE ======================#
all_images,self.openlist = sunpy.sunpy__load.load_all_broadband_images(filename,camera=camera,openlist=openlist)
#to_nu = ((self.lambda_eff**2 ) / (speedoflight_m)) #* pixel_area_in_str
to_nu = (self.ewidth_lambda/self.ewidth_nu)
to_microjanskies = (1.0e6) * to_nu * (1.0e26) # 1 muJy/str (1Jy = 1e-26 W/m^2/Hz)
this_image = all_images[band,:,:]
this_image = this_image * to_microjanskies # to microjanskies / str
if True: #verbose:
print "SUNRISE calculated the abmag for this system to be: {:.2f}".format(self.filter_data.AB_mag_nonscatter0[band])
self.sunrise_image.init_image(this_image, self, comoving_to_phys_fov=False)
# assume now that all images are in micro-Janskies per str
self.convolve_with_psf(add_psf=add_psf)
#self.add_gaussian_psf(add_psf=add_psf) add_gaussian_psf now called in convolve_with_psf, if appropriate
self.rebin_to_physical_scale(rebin_phys=rebin_phys)
self.add_noise(add_noise=add_noise, sn_limit=sn_limit, sky_sig=sky_sig)
self.calc_r_petro(r_petro_kpc=r_petro_kpc, resize_rp=resize_rp)
self.resize_image_from_rp(resize_rp=resize_rp)
self.seed = seed
self.bg_failed= False
self.seed = self.add_background(seed=self.seed, add_background=add_background, rebin_gz=rebin_gz, n_target_pixels=n_target_pixels, fix_seed=fix_seed)
end_time = time.time()
#print "init images + adding realism took "+str(end_time - start_time)+" seconds"
num_label = len(bb_label)
if verbose:
print "preparing to save "+filename[:filename.index(bb_label)]+'synthetic_image_'+filename[filename.index(bb_label)+num_label:filename.index('.fits')]+'_band_'+str(self.band)+'_camera_'+str(camera)+'_'+str(int(self.seed))+'.fits'
if save_fits:
if custom_fitsfile != None:
self.save_bgimage_fits_mujyas(custom_fitsfile,add_noise=add_noise,add_background=add_background)
self.fitsfn = custom_fitsfile
else:
orig_dir=filename[:filename.index('broadband')]
if bg_tag!=None:
outputfitsfile = orig_dir+output_label+'synthetic_image_'+filename[filename.index(bb_label)+num_label:filename.index('.fits')]+'_band_'+str(self.band)+'_camera_'+str(camera)+'_bg_'+str(int(bg_tag))+'.fits'
else:
outputfitsfile = orig_dir+output_label+'synthetic_image_'+filename[filename.index(bb_label)+num_label:filename.index('.fits')]+'_band_'+str(self.band)+'_camera_'+str(camera)+'_bg_'+str(int(self.seed))+'.fits'
self.save_bgimage_fits(outputfitsfile)
self.fitsfn=outputfitsfile
del self.sunrise_image, self.psf_image, self.rebinned_image, self.noisy_image, self.nmag_image, self.rp_image
gc.collect()
def convolve_with_psf(self, add_psf=True):
if add_psf:
if self.telescope.psf_fits_file != None:
#first, rebin to psf pixel scale
n_pixel_orig = self.sunrise_image.n_pixels
n_pixel_new = self.sunrise_image.n_pixels*self.sunrise_image.pixel_in_arcsec/self.telescope.psf_pixsize_arcsec
#print np.sum(self.sunrise_image.image)
new_image = congrid(self.sunrise_image.image, (n_pixel_new,n_pixel_new))
#print np.sum(new_image)
#second, convolve with PSF
if CONVOLVE_TYPE=='astropy':
#astropy.convolution.convolve()
print "convolving with astropy"
conv_im = convolve_fft(new_image,self.telescope.psf_kernel,boundary='fill',fill_value=0.0,normalize_kernel=True) #boundary option?
#print np.sum(conv_im)
else:
#scipy.signal.convolve2d()
conv_im = convolve(new_image,self.telescope.psf_kernel/np.sum(self.telescope.psf_kernel),boundary='fill',fillvalue=0.0,mode='same') #boundary option?
self.psf_image.init_image(conv_im,self)
del new_image, conv_im
else:
self.add_gaussian_psf(add_psf=add_psf)
else:
self.psf_image.init_image(self.sunrise_image.image, self)
gc.collect()
def add_gaussian_psf(self, add_psf=True, sample_factor=1.0): # operates on sunrise_image -> creates psf_image
if add_psf:
current_psf_sigma_pixels = self.telescope.psf_fwhm_arcsec * (1.0/2.355) / self.sunrise_image.pixel_in_arcsec
if current_psf_sigma_pixels<8: # want the psf sigma to be resolved with (at least) 8 pixels...
target_psf_sigma_pixels = 8.0
n_pixel_new = np.floor(self.sunrise_image.n_pixels * target_psf_sigma_pixels / current_psf_sigma_pixels )
if n_pixel_new > 1500: # for speed, beyond this, the PSF is already very small...
n_pixel_new = 1500
target_psf_sigma_pixels = n_pixel_new * current_psf_sigma_pixels / self.sunrise_image.n_pixels
new_image = congrid(self.sunrise_image.image, (n_pixel_new, n_pixel_new) )
current_psf_sigma_pixels = target_psf_sigma_pixels * (
(self.sunrise_image.n_pixels * target_psf_sigma_pixels
/ current_psf_sigma_pixels) / n_pixel_new )
else:
new_image = self.sunrise_image.image
psf_image = np.zeros_like( new_image ) * 1.0
dummy = sp.ndimage.filters.gaussian_filter(new_image,
current_psf_sigma_pixels, output=psf_image, mode='constant')
self.psf_image.init_image(psf_image, self)
del new_image, psf_image, dummy
else:
self.psf_image.init_image(self.sunrise_image.image, self)
gc.collect()
def rebin_to_physical_scale(self, rebin_phys=True):
if rebin_phys:
n_pixel_new = np.floor( ( self.psf_image.pixel_in_arcsec / self.telescope.pixelsize_arcsec ) * self.psf_image.n_pixels )
rebinned_image = congrid(self.psf_image.image, (n_pixel_new, n_pixel_new) )
self.rebinned_image.init_image(rebinned_image, self)
del n_pixel_new, rebinned_image
gc.collect()
else:
self.rebinned_image.init_image(self.psf_image.image, self)
def add_noise(self, add_noise=True, sky_sig=None, sn_limit=25.0):
if add_noise:
if sky_sig==None:
total_flux = np.sum( self.rebinned_image.image )
area = 1.0 * self.rebinned_image.n_pixels * self.rebinned_image.n_pixels
sky_sig = np.sqrt( (total_flux / sn_limit)**2 / (area**2 ) )
noise_image = sky_sig * np.random.randn( self.rebinned_image.n_pixels, self.rebinned_image.n_pixels )
new_image = self.rebinned_image.image + noise_image
self.noisy_image.init_image(new_image, self)
del noise_image, new_image
gc.collect()
else:
self.noisy_image.init_image(self.rebinned_image.image, self)
def calc_r_petro(self, r_petro_kpc=None, resize_rp=True): # rename to "set_r_petro"
" this routine is not working well. Must manually set r_p until this is fixed..."
if ( resize_rp==False ):
r_petro_kpc = 1.0
elif(r_petro_kpc==None):
#RadiusObject = RadialInfo(self.noisy_image.n_pixels, self.noisy_image.image)
r_petro_kpc = RadiusObject.PetroRadius * self.noisy_image.pixel_in_kpc # do this outside of the RadialInfo class'
if verbose:
print " we've calculated a r_p of "+str(r_petro_kpc)
del RadiusObject
gc.collect()
else:
r_petro_kpc = r_petro_kpc
if r_petro_kpc < 3.0:
r_petro_kpc = 3.0
if r_petro_kpc > 100.0:
r_petro_kpc = 100.0
r_petro_pixels = r_petro_kpc / self.noisy_image.pixel_in_kpc
self.r_petro_pixels = r_petro_pixels
self.r_petro_kpc = r_petro_kpc
def resize_image_from_rp(self, resize_rp=True, resize_factor=0.016, max_rp=100.0):
if resize_rp:
if self.r_petro_kpc < max_rp:
rp_pixel_in_kpc = resize_factor * self.r_petro_kpc # The target scale; was 0.008, upping to 0.016 for GZ based on feedback
else:
self.r_petro_kpc = max_rp
rp_pixel_in_kpc = resize_factor * self.r_petro_kpc
Ntotal_new = int( (self.noisy_image.pixel_in_kpc / rp_pixel_in_kpc ) * self.noisy_image.n_pixels )
rebinned_image = congrid(self.noisy_image.image , (Ntotal_new, Ntotal_new) )
diff = n_pixels_galaxy_zoo - Ntotal_new #
if diff >= 0:
shift = int(np.floor(1.0*diff/2.0))
lp = shift
up = shift + Ntotal_new
tmp_image = np.zeros( (n_pixels_galaxy_zoo, n_pixels_galaxy_zoo) )
tmp_image[lp:up,lp:up] = rebinned_image[0:Ntotal_new, 0:Ntotal_new]
rp_image = tmp_image
else:
shift = int( np.floor(-1.0*diff/2.0) )
lp = int(shift)
up = int(shift+n_pixels_galaxy_zoo)
rp_image = rebinned_image[lp:up, lp:up]
self.rp_image.init_image(rp_image, self, fov = (1.0*n_pixels_galaxy_zoo)*(resize_factor * self.r_petro_kpc) )
del rebinned_image, rp_image
gc.collect()
else:
self.rp_image.init_image(self.noisy_image.image, self, fov=self.noisy_image.pixel_in_kpc*self.noisy_image.n_pixels)
def add_background(self, seed=1, add_background=True, rebin_gz=False, n_target_pixels=n_pixels_galaxy_zoo, fix_seed=True):
if add_background and (len(backgrounds[self.band]) > 0):
bg_image = 10.0*self.rp_image.image # dummy values for while loop condition
tot_bg = np.sum(bg_image)
tot_img= np.sum(self.rp_image.image)
tol_fac = 1.0
while(tot_bg > tol_fac*tot_img):
#=== load *full* bg image, and its properties ===#
bg_filename = (backgrounds[self.band])[0]
if not (os.path.isfile(bg_filename)):
print " Background files were not found... "
print " The standard files used in Torrey al. (2015), Snyder et al., (2015) and Genel et al., (2014) ..."
print " can be downloaded using the download_backgrounds routine or manually from: "
print " http://illustris.rc.fas.harvard.edu/data/illustris_images_aux/backgrounds/SDSS_backgrounds/J113959.99+300000.0-u.fits "
print " http://illustris.rc.fas.harvard.edu/data/illustris_images_aux/backgrounds/SDSS_backgrounds/J113959.99+300000.0-g.fits "
print " "
file = fits.open(bg_filename) ; # was pyfits.open(bg_filename) ;
header = file[0].header ;
pixsize = get_pixelsize_arcsec(header) ;
Nx = header.get('NAXIS2') ; Ny = header.get('NAXIS1')
#=== figure out how much of the image to extract ===#
Npix_get = np.floor(self.rp_image.n_pixels * self.rp_image.pixel_in_arcsec / pixsize)
im = file[0].data # this is in some native units (nmaggies, for SDSS )
halfval_i = np.floor(np.float(Nx)/1.3)
halfval_j = np.floor(np.float(Ny)/1.3)
np.random.seed(seed=int(seed))
starti = np.random.random_integers(5,halfval_i)
startj = np.random.random_integers(5,halfval_j)
bg_image_raw = im[starti:starti+Npix_get,startj:startj+Npix_get] # the extracted patch...
#=== need to convert to microJy / str ===#
bg_image_muJy = bg_image_raw * 10.0**(-0.4*(bg_zpt[self.band_name][0]- 23.9 )) # if you got your zero points right, this is now in muJy
pixel_area_in_str = pixsize**2 / n_arcsec_per_str
bg_image = bg_image_muJy / pixel_area_in_str
#=== need to rebin bg_image ===#
bg_image = congrid(bg_image, (self.rp_image.n_pixels, self.rp_image.n_pixels))
#=== compare sum(bg_image) to sum(self.rp_image.image) ===#
if (fix_seed):
tot_bg = 0 # if seed is fixed, no need for brightness check...
else:
tot_bg = np.sum(bg_image)
tot_img= np.sum(self.rp_image.image)
if(tot_bg > tol_fac*tot_img):
seed+=1
new_image = bg_image + self.rp_image.image
new_image[ new_image < self.rp_image.image.min() ] = self.rp_image.image.min()
if (new_image.mean() > (5*self.rp_image.image.mean()) ):
self.bg_failed=True
del im, bg_image_raw, bg_image_muJy
else:
new_image = self.rp_image.image
if rebin_gz:
new_image = congrid( new_image, (n_target_pixels, n_target_pixels) )
self.bg_image.init_image(new_image, self, fov = self.rp_image.pixel_in_kpc * self.rp_image.n_pixels)
del new_image
gc.collect()
return seed
def save_bgimage_fits(self,outputfitsfile, save_img_in_muJy=False):
""" Written by G. Snyder 8/4/2014 to output FITS files from Sunpy module """
theobj = self.bg_image
image = np.copy( theobj.return_image() ) # in muJy / str
pixel_area_in_str = theobj.pixel_in_arcsec**2 / n_arcsec_per_str
image *= pixel_area_in_str # in muJy
print np.sum(image)
if save_img_in_muJy == False:
print bg_zpt[self.band_name]
if len(bg_zpt[self.band_name]) > 0:
image = image / ( 10.0**(-0.4*(bg_zpt[self.band_name][0]- 23.9 )) )
print ( 10.0**(-0.4*(bg_zpt[self.band_name][0]- 23.9 )) )
else:
print 'saving image in muJy!!!!!'
print " "
print " "
print image.shape
print np.sum(image)
# print 22.5 - 2.5*np.log10( np.sum(image) )
# print -2.5*np.log10( np.sum(image) )
print " "
primhdu = fits.PrimaryHDU(image) ; primhdu.header.update('IMUNIT','NMAGGIE',comment='approx 3.63e-6 Jy')
primhdu.header.update('ABABSZP',22.5,'For Final Image') #THIS SHOULD BE CORRECT FOR NANOMAGGIE IMAGES ONLY
# primhdu.header.update('ORIGZP',theobj.ab_abs_zeropoint,'For Original Image')
primhdu.header.update('PIXSCALE',theobj.pixel_in_arcsec,'For Final Image, arcsec')
primhdu.header.update('PIXORIG', theobj.camera_pixel_in_arcsec, 'For Original Image, arcsec')
primhdu.header.update('PIXKPC',theobj.pixel_in_kpc, 'KPC')
primhdu.header.update('ORIGKPC',self.sunrise_image.pixel_in_kpc,'For Original Image, KPC')
primhdu.header.update('NPIX',theobj.n_pixels)
primhdu.header.update('NPIXORIG',self.sunrise_image.n_pixels)
primhdu.header.update('REDSHIFT',self.cosmology.redshift)
primhdu.header.update('LUMDIST' ,self.cosmology.lum_dist, 'MPC')
primhdu.header.update('ANGDIST' ,self.cosmology.ang_diam_dist, 'MPC')
primhdu.header.update('PSCALE' ,self.cosmology.kpc_per_arcsec,'KPC')
primhdu.header.update('H0',self.cosmology.H0)
primhdu.header.update('WM',self.cosmology.WM)
primhdu.header.update('WV',self.cosmology.WV)
primhdu.header.update('PSFFWHM',self.telescope.psf_fwhm_arcsec,'arcsec')
primhdu.header.update('TPIX',self.telescope.pixelsize_arcsec,'arcsec')
primhdu.header.update('FILTER', self.band_name)
primhdu.header.update('FILE',self.filename)
# primhdu.update_ext_name('SYNTHETIC_IMAGE')
primhdu.header.name="SYNTHETIC_IMAGE"
# primhdu.header[keyword] = value
#Optionally, we can save additional images alongside these final ones
#e.g., the raw sunrise image below
#simhdu = pyfits.ImageHDU(self.sunriseimage, header=self.image_header) ; zhdu.update_ext_name('SIMULATED_IMAGE')
#newlist = pyfits.HDUList([primhdu, simhdu])
#create HDU List container
newlist = fits.HDUList([primhdu])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
#save container to file, overwriting as needed
newlist.writeto(outputfitsfile,clobber=True)
def save_bgimage_fits_mujyas(self,outputfitsfile, save_img_in_muJy=False,add_noise=False, add_background=False):
""" Written by G. Snyder 8/4/2014 to output FITS files from Sunpy module """
""" Updated 9/24/2015 """
theobj = self.bg_image
image = np.copy( theobj.return_image() ) # in muJy / str
image *= 1.0/n_arcsec_per_str # in muJy/Arcsec**2
sunobj = self.sunrise_image
sunimage = np.copy(sunobj.return_image() )
sunimage *= 1.0/n_arcsec_per_str
#print theobj.pixel_in_arcsec
AB_zeropoint = -2.5*np.log10(theobj.pixel_in_arcsec**2) - 2.5*(-6.0) + 2.5*np.log10(3631.0) #for image in muJy/Arcsec**2
total_apparent_mag = -2.5*np.log10(np.sum(image)) + AB_zeropoint
total_absolute_mag = -2.5*np.log10(np.sum(image)) + AB_zeropoint - self.cosmology.distance_modulus
sunrise_absolute_mag = self.sunrise_absolute_mag
sun_AB_app_zp = -2.5*np.log10(sunobj.pixel_in_arcsec**2) - 2.5*(-6.0) + 2.5*np.log10(3631.0)
sun_AB_cam_zp = -2.5*np.log10(sunobj.camera_pixel_in_arcsec**2) - 2.5*(-6.0) + 2.5*np.log10(3631.0)
sun_AB_abs_zp = sun_AB_app_zp - self.cosmology.distance_modulus
sunrise_image_camera_mag = -2.5*np.log10(np.sum(sunimage)) + sun_AB_cam_zp
sunrise_image_apparent_mag = -2.5*np.log10(np.sum(sunimage)) + sun_AB_app_zp
sunrise_image_absolute_mag = -2.5*np.log10(np.sum(sunimage)) + sun_AB_abs_zp
primhdu = fits.PrimaryHDU(np.float32(image)) ; primhdu.header.update('IMUNIT','muJy/SqArcsec',comment='microjanskies per square arcsecond')
primhdu.header.update('ABZP',round(AB_zeropoint,6),'For Final Image')
primhdu.header.update('PIXSCALE',round(theobj.pixel_in_arcsec,6),'For Final Image, arcsec')
primhdu.header.update('PIXORIG', round(theobj.camera_pixel_in_arcsec,6), 'For Original Image, arcsec')
primhdu.header.update('PIXKPC',round(theobj.pixel_in_kpc,6), 'KPC')
primhdu.header.update('ORIGKPC',round(self.sunrise_image.pixel_in_kpc,6),'For Original Image, KPC')
primhdu.header.update('NPIX',theobj.n_pixels)
primhdu.header.update('NPIXORIG',self.sunrise_image.n_pixels)
primhdu.header.update('REDSHIFT',self.cosmology.redshift)
primhdu.header.update('LUMDIST' ,round(self.cosmology.lum_dist,6), 'MPC')
primhdu.header.update('ANGDIST' ,round(self.cosmology.ang_diam_dist,6), 'MPC')
primhdu.header.update('PSCALE' ,round(self.cosmology.kpc_per_arcsec,6),'KPC')
primhdu.header.update('DISTMOD' ,round(self.cosmology.distance_modulus,6),'Mag')
primhdu.header.update('H0',round(self.cosmology.H0,6))
primhdu.header.update('WM',round(self.cosmology.WM,6))
primhdu.header.update('WV',round(self.cosmology.WV,6))
if self.telescope.psf_fits_file==None:
primhdu.header.update('PSFFWHM',round(self.telescope.psf_fwhm_arcsec,6),'arcsec')
else:
primhdu.header.update('PSFFILE',os.path.join(os.path.basename(os.path.dirname(self.telescope.psf_fits_file)),os.path.basename(self.telescope.psf_fits_file)))
primhdu.header.update('TPIX',round(self.telescope.pixelsize_arcsec,6),'arcsec')
primhdu.header.update('FILTER', self.band_name)
primhdu.header.update('FILE',self.filename)
primhdu.header.update('EFLAMBDA',round(self.lambda_eff*1.0e6,6),'filter effective wavelength [microns]')
primhdu.header.update('MAG', round(total_apparent_mag,6), 'AB system')
primhdu.header.update('ABSMAG', round(total_absolute_mag,6), 'AB system')
primhdu.header.update('SUNMAG', round(sunrise_absolute_mag,6), 'from spectrum, Note: excludes Lyman absorption')
primhdu.header.update('SUNCMAG', round(sunrise_image_camera_mag,6), 'from image, camera mag')
primhdu.header.update('SUNAPMAG', round(sunrise_image_apparent_mag,6), 'from image, apparent mag')
primhdu.header.update('SUABSMAG', round(sunrise_image_absolute_mag,6), 'from image, absolute mag')
if add_noise==False and add_background==False:
primhdu.header.update('SKYSIG', 0.0, 'image units')
elif sky_sig != None:
primhdu.header.update('SKYSIG', round(self.sky_sig,6), 'image units')
if add_background==True:
primhdu.header.update('BGFILE', os.path.basename(backgrounds[self.band]))
camera_param_cards = self.param_header.cards[13:]
for card in camera_param_cards:
#print card
primhdu.header.append(card)
primhdu.update_ext_name('SYNTHETIC_IMAGE')
#Optionally, we can save additional images alongside these final ones
#e.g., the raw sunrise image below
#simhdu = pyfits.ImageHDU(self.sunriseimage, header=self.image_header) ; zhdu.update_ext_name('SIMULATED_IMAGE')
#newlist = pyfits.HDUList([primhdu, simhdu])
#create HDU List container
newlist = fits.HDUList([primhdu])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
#save container to file, overwriting as needed
newlist.writeto(outputfitsfile,clobber=True)
def get_pixelsize_arcsec(header):
cd1_1 = header.get('CD1_1') # come in degrees
cd1_2 = header.get('CD1_2')
if cd1_2==None:
cd1_2 = header.get('CD2_2')
try:
pix_arcsec = 3600.0*(cd1_1**2 + cd1_2**2)**0.5
except:
print "WARNING!!! SETTING PIXEL SCALE MANUALLY!"
pix_arcsec = 0.05
return pix_arcsec
from scipy.optimize import curve_fit
def my_fit(r, a, b, c):
return a * np.exp(-r / b) + c
class RadialInfo:
""" Class for giving radial profile info for rp calcultions """
def __init__(self,N, image, num_pts=100, max_pixels_for_fit=100000):
self.Npix = N
self.RadiusGrid = np.linspace(0.01,1.0*N,num=num_pts)
self.PetroRatio = np.ones_like( self.RadiusGrid)
xgrid = np.linspace(float(-self.Npix)/2.0 + 0.5,float(self.Npix)/2.0 - 0.5,num=self.Npix)
xsquare = np.zeros((self.Npix,self.Npix))
ysquare = np.zeros_like(xsquare)
ones = np.ones((self.Npix,self.Npix))
for j in range(self.Npix):
xsquare[j,:] = xgrid
ysquare[:,j] = xgrid
self.rsquare = (xsquare**2 + ysquare**2)**0.5
x0 = np.array(self.rsquare).flatten()
y0 = np.array(image).flatten()
#print x0.shape
x0 = x0[ y0 > 0 ]
y0 = y0[ y0 > 0 ]
if x0.shape[0] > max_pixels_for_fit:
index_list = np.arange( x0.shape[0] )
index_list = np.random.choice( index_list, max_pixels_for_fit)
x0 = x0[index_list]
y0 = y0[index_list]
popt, pcov = curve_fit(my_fit, x0, np.log10(y0))
y1 = 10.0**(my_fit(self.RadiusGrid, *popt))
fake_image1 = 10.0**(my_fit(self.rsquare, *popt))
y2 = 10.0**(my_fit(self.RadiusGrid, *popt)) - 10.0**popt[2]
fake_image2 = 10.0**(my_fit(self.rsquare, *popt)) - 10.0**popt[2]
y1sum = np.zeros_like(y1)
y2sum = np.zeros_like(y2)
for index,val in enumerate(y1[:-1]):
if index==0:
this_r = 0.5*(self.RadiusGrid[index]+self.RadiusGrid[index+1])
y1sum[index] = 3.14159 * this_r**2 * y1[index]
y2sum[index] = 3.14159 * this_r**2 * y2[index]
else:
y1sum[index] = y1sum[index-1] + 3.14159 * (self.RadiusGrid[index+1]**2 - self.RadiusGrid[index]**2 ) * (y1[index] + y1[index+1])/2.0
y2sum[index] = y2sum[index-1] + 3.14159 * (self.RadiusGrid[index+1]**2 - self.RadiusGrid[index]**2 ) * (y2[index] + y2[index+1])/2.0
for index,val in enumerate(y1sum[:-1]):
this_r = (0.5*(self.RadiusGrid[index]+self.RadiusGrid[index+1]))
y1sum[index] = val / (3.14159 * this_r **2 )
y2sum[index] = y2sum[index] / (3.14159 * this_r **2 )
self.PetroRatio = np.array(y2/y2sum)
self.PetroRatio[np.isnan(self.PetroRatio)] = 0.0
self.PetroRatio[np.isinf(self.PetroRatio)] = 0.0
self.Pind = np.argmin( np.absolute( np.flipud(self.PetroRatio) - 0.2) )
self.PetroRadius = np.flipud(self.RadiusGrid)[self.Pind]
if verbose:
print y2
print " Saving Figure ..."
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(22,5))
ax = fig.add_subplot(1,5,1)
print x0.shape
ax.plot(x0, y0, 'ro', ms=5)
ax.plot(self.RadiusGrid, y1, 'g', lw=3, ls='-')
ax.plot(self.RadiusGrid, y2, 'b', lw=3, ls='-')
ax.plot(self.RadiusGrid, y1sum, 'g', lw=3, ls='-.')
ax.plot(self.RadiusGrid, y2sum, 'b', lw=3, ls='-.')
ax.plot([self.PetroRadius, self.PetroRadius], [1,1e20], 'k', lw=1, ls='-')
ax.set_yscale('log')
ax.set_ylim([1e7,1e13])
ax = fig.add_subplot(1,5,2)
ax.plot(self.RadiusGrid, y1/y1sum, 'g', lw=3)
ax.plot(self.RadiusGrid, y2/y2sum, 'b', lw=3)
ax.plot([self.PetroRadius, self.PetroRadius], [-10,10], 'k', lw=1, ls='-')
ax.plot(self.RadiusGrid, np.ones_like(self.RadiusGrid) * 0.2 )
ax.set_ylim([0,1])
ax = fig.add_subplot(1,5,3)
ax.imshow( np.log10(image), vmin=7, vmax=13 )
ax = fig.add_subplot(1,5,4)
ax.imshow( np.log10(fake_image1), vmin=7, vmax=13 )
ax = fig.add_subplot(1,5,5)
ax.imshow( np.log10(fake_image2), vmin=7, vmax=13 )
fig.savefig('temp1.png')
fig.clf()
plt.close()
# print " Figure has been saved ... "
# for i,rad in enumerate(self.RadiusGrid):
# tf_annulus = np.logical_and( self.rsquare < 1.25*rad, self.rsquare > 0.80*rad )
# tf_annulus = np.logical_and( tf_annulus, image > min_img_thresh )
# self.annulus_indices.append( np.where(tf_annulus) ) #np.logical_and( self.rsquare < 1.25*rad, self.rsquare > 0.80*rad )) )#
# tf_int = np.logical_and( self.rsquare < rad, image > min_img_thresh )
# self.interior_indices.append( np.where(tf_int) )
# self.annulus_sums.append( np.sum(ones[self.annulus_indices[i]] ) )
# self.interior_sums.append( np.sum(ones[self.interior_indices[i]]) )
# this_sum = np.sum( image[self.interior_indices[i]])
# for radius in RadiusObject.RadiusGrid:
# pflux_annulus = image[ self.annulus_indices[i] ]
# pflux_interior = image[ self.interior_indices[i] ]
# self.sumI_r[i] = np.sum(pflux_interior)
# if(self.annulus_sums[i]*self.interior_sums[i] != 0.0):
# self.AnnulusSB[i] = (np.sum(pflux_annulus)/self.annulus_sums[i])
# self.IntSB[i] = (np.sum(pflux_interior)/self.interior_sums[i])
# self.PetroRatio[i] = (np.sum(pflux_annulus)/self.annulus_sums[i])/(np.sum(pflux_interior)/self.interior_sums[i])
class fits_header:
def __init__(self, filename):
if (not os.path.exists(filename)):
print "file not found:", filename
sys.exit()
hdulist = fits.open(filename)
self.info = hdulist.info()
def my_fits_open(filename):
if (not os.path.exists(filename)):
print "file not found:", filename
sys.exit()
return fits.open(filename)
#============ COSMOLOGY PARAMETERS =====================#
# cosmology class:
#
# used to track (i) the cosmological parameters and
# (ii) image properties set by our adopted cosmology
#
# This class is used to distinguish features of the telescope
# (e.g., pixel size in arcseconds) from features of our
# adopted cosmology (e.g.,image kpc per arcsec)
#
#=======================================================#
class cosmology:
def __init__(self, redshift, H0=70.4, WM=0.2726, WV=0.7274):
self.H0=H0
self.WM=WM
self.WV=WV
self.redshift = redshift
self.lum_dist = (cosmocalc.cosmocalc(self.redshift, H0=self.H0, WM=self.WM, WV=self.WV))['DL_Mpc'] ## luminosity dist in mpc
self.ang_diam_dist = (cosmocalc.cosmocalc(self.redshift, H0=self.H0, WM=self.WM, WV=self.WV))['DA_Mpc'] ##
self.kpc_per_arcsec = (cosmocalc.cosmocalc(self.redshift, H0=self.H0, WM=self.WM, WV=self.WV))['PS_kpc']
self.distance_modulus = 5.0 * ( np.log10(self.lum_dist*1.0e6) - 1.0 )
#============ TELESCOPE PARAMETERS =====================#
# telescope class:
#
# used to track the psf size in arcsec and pixelsize in arcsec
#=======================================================#
class telescope:
def __init__(self, psf_fwhm_arcsec, pixelsize_arcsec, psf_fits, psf_pixsize_arcsec,rebin_phys,add_psf,psf_truncate_pixels,psf_hdu_num):
self.psf_fwhm_arcsec = psf_fwhm_arcsec
self.pixelsize_arcsec = pixelsize_arcsec
self.psf_truncate_pixels = psf_truncate_pixels
self.psf_hdu_num = psf_hdu_num
self.psf_fits_file = None
self.psf_kernel = None
self.psf_pixsize_arcsec = None
#future upgrade: pass kernel directly and pixel scale instead?
if psf_fits != None:
self.psf_fits_file = psf_fits
orig_psf_hdu = fits.open(psf_fits,ignore_missing_end=True)[psf_hdu_num]
orig_psf_kernel = orig_psf_hdu.data
#some psfs come in cubes... what does this parameter mean? for STDPSF: fiducial detector positions... want near center
if orig_psf_kernel.ndim==3:
npsf = orig_psf_kernel.shape[0]
orig_psf_kernel = orig_psf_kernel[npsf/2,:,:]
if psf_truncate_pixels != None:
psfc = orig_psf_kernel.shape[0]/2
st = self.psf_truncate_pixels
orig_psf_kernel = orig_psf_kernel[psfc-st:psfc+st,psfc-st:psfc+st]
#psf kernel shape must be odd for astropy.convolve??
if orig_psf_kernel.shape[0] % 2 == 0:
new_psf_shape = orig_psf_kernel.shape[0]-1
self.psf_kernel = congrid(orig_psf_kernel,(new_psf_shape,new_psf_shape))
else:
self.psf_kernel = orig_psf_kernel
assert( self.psf_kernel.shape[0] % 2 != 0)
assert (psf_pixsize_arcsec != None)
self.psf_pixsize_arcsec = psf_pixsize_arcsec
if (self.psf_pixsize_arcsec > self.pixelsize_arcsec) and (rebin_phys==True) and (add_psf==True):
print "WARNING: you are requesting to rebin an image to a higher resolution than the requested PSF file supports. OK if this is desired behavior."
#=====================================================#
# single_image class:
#
# This class is used to host and track the properties for
# a single image (one galaxy, one band, one level of realism).
# This class tracks important image traits, such as the
# image array itself, the field of view, number of pixels,
# ab_zeropoint, pixel scale, etc.
#
# When new images are created (e.g., when psf bluring is
# done on the original image) a new "single_image" instance
# is created.
#
# The synthetic_image class (defined below) contains
# several instances of this single_image class
#
#=====================================================#
class single_image:
def __init__(self):
self.image_exists = False
def init_image(self, image, parent_obj, fov=None, comoving_to_phys_fov=False):
self.image = image
self.n_pixels = image.shape[0]
if fov==None:
if comoving_to_phys_fov:
self.pixel_in_kpc = parent_obj.param_header.get('linear_fov') / self.n_pixels / (parent_obj.cosmology.redshift+1)
else:
self.pixel_in_kpc = parent_obj.param_header.get('linear_fov') / self.n_pixels
else:
self.pixel_in_kpc = fov / self.n_pixels
self.pixel_in_arcsec = self.pixel_in_kpc / parent_obj.cosmology.kpc_per_arcsec
self.image_exists = True
self.camera_pixel_in_arcsec = (self.pixel_in_kpc / parent_obj.param_header.get('cameradist') ) * 2.06e5
pixel_in_sr = (1e3*self.pixel_in_kpc /10.0)**2
image_in_muJy = self.image * pixel_in_sr # should now have muJy
tot_img_in_Jy = np.sum(image_in_muJy) / 1e6 # now have total image flux in Jy
abmag = -2.5 * np.log10(tot_img_in_Jy / 3631 )
# print "the ab magnitude of this image is :"+str(abmag)
def calc_ab_abs_zero(self, parent_obj):
lambda_eff_in_m = parent_obj.lambda_eff
pixel_area_in_str = self.camera_pixel_in_arcsec**2 / n_arcsec_per_str
cameradist_in_kpc = parent_obj.param_header.get('cameradist')
to_nu = ((lambda_eff_in_m**2 ) / (speedoflight_m))* pixel_area_in_str
to_microjanskies = (1.0e6) * to_nu * (1.0e26) # 1 Jy = 1e-26 W/m^2/Hz
to_microjanskies_at_10pc = to_microjanskies * (cameradist_in_kpc / abs_dist)**2
ab_abs_zeropoint = 23.90 - (2.5*np.log10(to_microjanskies_at_10pc))
self.ab_abs_zeropoint = ab_abs_zeropoint
def convert_orig_to_nanomaggies(self, parent_obj):
distance_factor = (10.0 / (parent_obj.cosmology.lum_dist * 1.0e6))**2
orig_to_nmaggies = distance_factor * 10.0**(0.4*(22.5 - self.ab_abs_zeropoint) )
self.image_in_nmaggies = self.image * orig_to_nmaggies
def return_image(self):
return self.image
def return_img_nanomaggies_to_orig(image_nm, lum_dist, ab_abs_zeropoint):
distance_factor = (10.0 / (lum_dist * 1.0e6))**2
orig_to_nmaggies = distance_factor * 10.0**(0.4*(22.5 - ab_abs_zeropoint) )
return image_nm / orig_to_nmaggies
def congrid(a, newdims, centre=False, minusone=False):
''' Slimmed down version of congrid as originally obtained from:
http://wiki.scipy.org/Cookbook/Rebinning
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print "[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions."
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind='linear', bounds_error=False, fill_value=0.0 )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind='linear', bounds_error=False, fill_value=0.0 )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
def download_backgrounds():
if not os.path.exists('./data'):
os.makedirs('./data')
if not os.path.exists('./data/SDSS_backgrounds'):
os.makedirs('./data/SDSS_backgrounds')
if not os.path.exists('./data/HST_backgrounds'):
os.makedirs('./data/HST_backgrounds')
for this_background in backgrounds:
if len(this_background) > 0:
if not (os.path.isfile(this_background[0])):
url=dl_base+this_background[0][len(bg_base):]
this_file = wget.download(url)
os.rename(this_file, this_background[0])
print url
|
ptorrey/sunpy
|
sunpy__synthetic_image.py
|
Python
|
mit
| 46,931
|
[
"Galaxy",
"Gaussian"
] |
3cbafea6b2437304f167ad81d1f7bf5de66b203e2f47c408abf173fded4740d3
|
from setuptools import setup
import versioneer
commands = versioneer.get_cmdclass()
setup(name="magic-wormhole",
version=versioneer.get_version(),
description="Securely transfer data between computers",
author="Brian Warner",
author_email="warner-magic-wormhole@lothar.com",
license="MIT",
url="https://github.com/warner/magic-wormhole",
package_dir={"": "src"},
packages=["wormhole",
"wormhole.blocking", "wormhole.twisted",
"wormhole.scripts", "wormhole.test", "wormhole.util",
"wormhole.servers"],
package_data={"wormhole": ["db-schemas/*.sql"]},
entry_points={"console_scripts":
["wormhole = wormhole.scripts.runner:entry"]},
install_requires=["spake2==0.2", "pynacl", "requests", "argparse"],
test_suite="wormhole.test",
cmdclass=commands,
)
|
shaunstanislaus/magic-wormhole
|
setup.py
|
Python
|
mit
| 903
|
[
"Brian"
] |
04ab5b13cc972092cec25a7c2d01366dbac935a34e8acfaf71e7fdba7ce773ca
|
from __future__ import unicode_literals
from __future__ import absolute_import
from functools import reduce
import logging
from docker.errors import APIError
from .config import get_service_name_from_net, ConfigurationError
from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF
from .container import Container
from .legacy import check_for_legacy_containers
from .service import Service
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in service.get('volumes_from', []) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(Service(client=client, project=name, links=links, net=net,
volumes_from=volumes_from, **service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
if 'net' in service_dict:
net_name = get_service_name_from_net(service_dict.get('net'))
if net_name:
try:
net = self.get_service(net_name)
except NoSuchService:
try:
net = Container.from_id(self.client, net_name)
except APIError:
raise ConfigurationError('Service "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
else:
net = service_dict['net']
del service_dict['net']
else:
net = None
return net
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
allow_recreate=True,
force_recreate=False,
insecure_registry=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(
services,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
insecure_registry=insecure_registry,
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self,
services,
allow_recreate=True,
force_recreate=False):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and allow_recreate:
log.debug(
'%s has upstream changes (%s)',
service.name, ", ".join(updated_dependencies),
)
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=True,
)
else:
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
plans[service.name] = plan
return plans
def pull(self, service_names=None, insecure_registry=False):
for service in self.get_services(service_names, include_deps=True):
service.pull(insecure_registry=insecure_registry)
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return filter(matches_service_names, containers)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
|
feelobot/compose
|
compose/project.py
|
Python
|
apache-2.0
| 12,425
|
[
"VisIt"
] |
83392efc9ae10c7f0d11e61b593fb3a9151e7e5f94cd54dc64d3d2ee70ce263d
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Ali Afshar <aafshar@gmail.com>
## Johan Dahlin <jdahlin@async.com.br>
##
"""
Storm integration for Kiwi
"""
from storm.expr import And, Or, Like
from kiwi.db.query import NumberQueryState, StringQueryState, \
DateQueryState, DateIntervalQueryState, QueryExecuter, \
NumberIntervalQueryState
class StormQueryExecuter(QueryExecuter):
"""Execute queries from a storm database"""
def __init__(self, store=None):
QueryExecuter.__init__(self)
self.store = store
self.table = None
def search(self, states):
"""
Build and execute a query for the search states
"""
queries = []
for state in states:
search_filter = state.filter
assert state.filter
if search_filter in self._columns:
query = self._construct_state_query(
self.table, state, self._columns[search_filter])
if query:
queries.append(query)
return self.store.find(self.table, queries)
def set_table(self, table):
"""
Sets the Storm table/object for this executer
@param table: a Storm table class
"""
self.table = table
# Basically stolen from sqlobject integration
def _construct_state_query(self, table, state, columns):
queries = []
for column in columns:
query = None
table_field = getattr(table, column)
if isinstance(state, NumberQueryState):
query = self._parse_number_state(state, table_field)
elif isinstance(state, NumberIntervalQueryState):
query = self._parse_number_interval_state(state, table_field)
elif isinstance(state, StringQueryState):
query = self._parse_string_state(state, table_field)
elif isinstance(state, DateQueryState):
query = self._parse_date_state(state, table_field)
elif isinstance(state, DateIntervalQueryState):
query = self._parse_date_interval_state(state, table_field)
else:
raise NotImplementedError(state.__class__.__name__)
if query:
queries.append(query)
if queries:
return Or(*queries)
def _parse_number_state(self, state, table_field):
if state.value is not None:
return table_field == state.value
def _parse_number_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(table_field <= state.end)
if queries:
return And(*queries)
def _parse_string_state(self, state, table_field):
if not state.text:
return
text = '%%%s%%' % state.text.lower()
return Like(table_field, text)
def _parse_date_state(self, state, table_field):
if state.date:
return table_field == state.date
def _parse_date_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(table_field <= state.end)
if queries:
return And(*queries)
|
fboender/miniorganizer
|
src/lib/kiwi/db/stormintegration.py
|
Python
|
gpl-3.0
| 4,195
|
[
"VisIt"
] |
66e994418ffc47a2303f48a4f0bce2f43c7c3f1e0ea190344cb000d1f703dc4b
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
""" Application specific tags. """
import django_tables2 as tables
from django import template
from karaage.people.tables import PersonTable
from ..views.base import get_state_machine
register = template.Library()
@register.simple_tag(takes_context=True)
def application_state(context, application):
""" Render current state of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_state.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_request(context, application):
""" Render current detail of application, verbose. """
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_request.html' % application.type)
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def application_simple_state(context, application):
""" Render current state of application, verbose. """
state_machine = get_state_machine(application)
state = state_machine.get_state(application)
return state.name
@register.inclusion_tag(
'kgapplications/common_actions.html', takes_context=True)
def application_actions(context):
""" Render actions available. """
return {
'roles': context['roles'],
'actions': context['actions'],
'extra': "",
}
@register.tag(name="application_actions_plus")
def do_application_actions_plus(parser, token):
""" Render actions available with extra text. """
nodelist = parser.parse(('end_application_actions',))
parser.delete_first_token()
return ApplicationActionsPlus(nodelist)
class ApplicationActionsPlus(template.Node):
""" Node for rendering actions available with extra text. """
def __init__(self, nodelist):
super(ApplicationActionsPlus, self).__init__()
self.nodelist = nodelist
def render(self, context):
extra = self.nodelist.render(context)
nodelist = template.loader.get_template(
'kgapplications/common_actions.html')
new_context = {
'roles': context['roles'],
'extra': extra,
'actions': context['actions'],
}
output = nodelist.render(new_context)
return output
@register.simple_tag(takes_context=True)
def get_similar_people_table(context, applicant):
queryset = applicant.similar_people()
table = PersonTable(
queryset,
empty_text="(No potential duplicates found, please check manually)")
config = tables.RequestConfig(context['request'], paginate={"per_page": 5})
config.configure(table)
return table
|
brianmay/karaage
|
karaage/plugins/kgapplications/templatetags/applications.py
|
Python
|
gpl-3.0
| 3,713
|
[
"Brian"
] |
1ea8aeff59b38ed3c2dd4599376a456d86980aac041ff7b3e04a436b11ea81f4
|
# Parsing congress.gov/members/
import requests
from lxml import html
from lxml.html import fromstring
import sqlite3
import os
def find(name, path):
return_val = False
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
if name in files:
return_val = True
return return_val
# FIRST: Create a database, or connect if it already exists.
if find('congresspeople.db', os.path.relpath('parsing.py')):
conn = sqlite3.connect('congresspeople.db')
c = conn.cursor()
c.execute('''DELETE FROM'''+ ' congress')
else:
conn = sqlite3.connect('congresspeople.db')
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE congress
(name text, state text, district text, party text, time text, phone text, website text, address text)''')
# SECOND: Get Senators and Reps.
# create dictionary
takeadict = []
# get info from website
page = requests.get('https://www.congress.gov/members?pageSize=250&q={"congress":"115"}')
tree = html.fromstring(page.content)
# get senator urls
hrefs = tree.xpath('//select[@id="members-senators"]/option/@value')
del hrefs[0]
# get representative urls
representativehrefs = tree.xpath('//select[@id="members-representatives"]/option/@value')
del representativehrefs[0]
# put senators in dictionary
for i in range(441):
hrefs.append( representativehrefs[ i ] )
#THIRD: Yo visit all those links tho. And get all the data.
for href in hrefs:
#for i in range(1):
hpage = requests.get( href )
htree = html.fromstring( hpage.content )
# get names
names = htree.xpath('//h1[@class="legDetail"]/text()')
# get state, district and time in congress
statedistrictcon = htree.xpath('//table[@class="standard01 lateral01"]/tbody/tr/td/text()')
# put those in separate lists
states = []
districts = []
times =[]
for cnt in range( 3 ):
if cnt == 0:
states.append(statedistrictcon[cnt])
elif cnt == 1:
districts.append(statedistrictcon[cnt])
elif cnt == 2:
times.append(statedistrictcon[cnt])
# get website
websites = htree.xpath('//table[@class="standard01 nomargin"]/tr/td/a/@href')
if len(websites) == 0:
websites.append('--')
# get address, phone number and party
contacts = htree.xpath('//table[@class="standard01 nomargin"]/tr/td/text()')
# put those in separate lists
addresses = []
phones = []
parties = []
inc = 0
if len(contacts) == 5 :
for cnt in range(5):
if cnt == 2:
addresses.append(contacts[cnt])
elif cnt == 3:
phones.append(contacts[cnt])
elif cnt == 4:
parties.append(contacts[cnt])
elif len(contacts) == 3 :
for cnt in range(3):
if cnt == 0:
addresses.append(contacts[cnt])
elif cnt == 1:
phones.append(contacts[cnt])
elif cnt == 2:
parties.append(contacts[cnt])
else:
addresses.append('--')
phones.append('--')
parties.append(contacts[0])
# do something with them before the for loop ends probably. like put them in a database.
for cnt in range(1):
temp = [names[cnt], states[cnt], districts[cnt], parties[cnt], times[cnt], phones[cnt], websites[cnt], addresses[cnt]]
c.execute('INSERT INTO congress VALUES (?,?,?,?,?,?,?,?)', temp)
conn.commit()
conn.close()
|
walke469/spartahack-17
|
parsingfinal.py
|
Python
|
bsd-2-clause
| 3,536
|
[
"VisIt"
] |
5c1fadd3d62b44347dd598f96d01e4f766476b99a951a378a5c2445e72af16f6
|
"""
Page objects for interacting with the test site.
"""
import os
import time
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from bok_choy.javascript import js_defined, requirejs, wait_for_js
class SitePage(PageObject):
"""
Base class for all pages in the test site.
"""
# Get the server port from the environment
# (set by the test runner script)
SERVER_PORT = os.environ.get("SERVER_PORT", 8003)
def is_browser_on_page(self):
title = self.name.lower().replace('_', ' ')
return title in self.browser.title.lower()
@property
def url(self):
return "http://localhost:{0}/{1}".format(self.SERVER_PORT, self.name + ".html")
@property
def output(self):
"""
Return the contents of the "#output" div on the page.
The fixtures are configured to update this div when the user
interacts with the page.
"""
text_list = self.q(css='#output').text
if len(text_list) < 1:
return None
else:
return text_list[0]
class ButtonPage(SitePage):
"""
Page for testing button interactions.
"""
name = "button"
def click_button(self):
"""
Click the button on the page, which should cause the JavaScript
to update the #output div.
"""
self.q(css='div#fixture input').first.click()
class TextFieldPage(SitePage):
"""
Page for testing text field interactions.
"""
name = "text_field"
def enter_text(self, text):
"""
Input `text` into the text field on the page.
"""
self.q(css='#fixture input').fill(text)
class SelectPage(SitePage):
"""
Page for testing select input interactions.
"""
name = "select"
def select_car(self, car_value):
"""
Select the car with `value` in the drop-down list.
"""
self.q(css='select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
def is_car_selected(self, car):
return self.q(css='select[name="cars"] option[value="{}"]'.format(car)).selected
class CheckboxPage(SitePage):
"""
Page for testing checkbox interactions.
"""
name = "checkbox"
def toggle_pill(self, pill_name):
"""
Toggle the box for the pill with `pill_name` (red or blue).
"""
self.q(css="#fixture input#{}".format(pill_name)).first.click()
class AlertPage(SitePage):
"""
Page for testing alert handling.
"""
name = "alert"
def confirm(self):
with self.handle_alert(confirm=True):
self.q(css='button#confirm').first.click()
def cancel(self):
with self.handle_alert(confirm=False):
self.q(css='button#confirm').first.click()
def dismiss(self):
with self.handle_alert():
self.q(css='button#alert').first.click()
class SelectorPage(SitePage):
"""
Page for testing retrieval of information by CSS selectors.
"""
name = "selector"
@property
def num_divs(self):
"""
Count the number of div.test elements.
"""
return len(self.q(css='div.test').results)
@property
def div_text_list(self):
"""
Return list of text for each div.test element.
"""
return self.q(css='div.test').text
@property
def div_value_list(self):
"""
Return list of values for each div.test element.
"""
return self.q(css='div.test').attrs('value')
@property
def div_html_list(self):
"""
Return list of html for each div.test element.
"""
return self.q(css='div.test').html
def ids_of_outer_divs_with_inner_text(self, child_text):
"""
Return a list of the ids of outer divs with
the specified text in a child element.
"""
return self.q(css='div.outer').filter(
lambda el:
child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')]
).attrs('id')
class DelayPage(SitePage):
"""
Page for testing elements that appear after a delay.
"""
name = "delay"
def trigger_output(self):
"""
Wait for click handlers to be installed,
then click a button and retrieve the output that appears
after a delay.
"""
EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill()
self.q(css='div#fixture button').first.click()
EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
def make_broken_promise(self):
"""
Make a promise that will not be fulfilled.
Should raise a `BrokenPromise` exception.
"""
return EmptyPromise(
self.q(css='div#not_present').is_present, "Invalid div appeared",
try_limit=3, try_interval=0.01
).fulfill()
class SlowPage(SitePage):
"""
Page that loads its elements slowly.
"""
name = "slow"
def is_browser_on_page(self):
return self.q(css='div#ready').is_present()
class NextPage(SitePage):
"""
Page that loads another page after a delay.
"""
name = "next_page"
def is_browser_on_page(self):
return self.q(css='#next').is_present()
def load_next(self, page, delay_sec):
"""
Load the page named `page_name` after waiting for `delay_sec`.
"""
time.sleep(delay_sec)
page.visit()
class VisiblePage(SitePage):
"""
Page that has some elements visible and others invisible.
"""
name = "visible"
def is_visible(self, name):
"""
Return a boolean indicating whether the given item is visible.
"""
return self.q(css="div.{}".format(name)).first.visible
def is_invisible(self, name):
"""
Return a boolean indicating whether the given element is present, but not visible.
"""
return self.q(css="div.{}".format(name)).first.invisible
@js_defined('test_var1', 'test_var2')
class JavaScriptPage(SitePage):
"""
Page for testing asynchronous JavaScript.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@wait_for_js
def reload_and_trigger_output(self):
"""
Reload the page, wait for JS, then trigger the output.
"""
self.browser.refresh()
self.wait_for_js()
self.q(css='div#fixture button').first.click()
@js_defined('something.SomethingThatDoesntExist')
class JavaScriptUndefinedPage(SitePage):
"""
Page for testing asynchronous JavaScript, where the
javascript that we wait for is never defined.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@requirejs('main')
class RequireJSPage(SitePage):
"""
Page for testing asynchronous JavaScript loaded with RequireJS.
"""
name = "requirejs"
@property
@wait_for_js
def output(self):
return super(RequireJSPage, self).output
class AjaxNoJQueryPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax_no_jquery"
class AjaxPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax"
def click_button(self):
"""
Click the button on the page, which triggers an ajax
call that updates the #output div.
"""
self.q(css='div#fixture button').first.click()
class WaitsPage(SitePage):
"""
Page for testing wait helpers.
"""
name = "wait"
def is_button_output_present(self):
"""
Click button and wait until output id appears in DOM.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_presence('div#output', 'Button Output is Available')
def is_class_absent(self):
"""
Click button and wait until playing class disappeared from DOM
"""
self.q(css='#spinner').first.click()
self.wait_for_element_absence('.playing', 'Animation Stopped')
def is_button_output_visible(self):
"""
Click button and wait until output is displayed.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_visibility('div#output', 'Button Output is Visible')
def is_spinner_invisible(self):
"""
Click button and wait until spinner is disappeared.
"""
self.q(css='#spinner').first.click()
self.wait_for_element_invisibility('#anim', 'Button Output is Visible')
class AccessibilityPage(SitePage):
"""
Page for testing accessibility auditing.
"""
name = "accessibility"
class ImagePage(SitePage):
"""
Page for testing image capture and comparison.
"""
name = "image"
|
drptbl/bok-choy
|
tests/pages.py
|
Python
|
apache-2.0
| 9,347
|
[
"VisIt"
] |
500881dbb7bec945706a8285a1408db89b65fd6eb8457b09dce96a2363fb56d4
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 28 18:16:14 2017
@author: kasperipalkama
"""
from random import uniform, seed
from math import exp
def initialize(n_input, n_hidden_neuron, n_hidden_layer, n_output):
seed(1)
network = list()
left_bound = -0.5 / n_input # common way to initialize weights
rigth_bound = 0.5 / n_input
for i in range(n_hidden_layer):
if i == 0:
hidden_layer = [{'weights': [uniform(left_bound, rigth_bound)
for _ in range(n_input + 1)]}
for _ in range(n_hidden_neuron)]
else:
left_bound = -0.5 / n_hidden_neuron
rigth_bound = 0.5 / n_hidden_neuron
hidden_layer = [{'weights': [uniform(left_bound, rigth_bound)
for _ in range(n_hidden_neuron+1)]}
for _ in range(n_hidden_neuron)]
network.append(hidden_layer)
output_layer = [{'weights': [uniform(left_bound, rigth_bound)
for _ in range(n_hidden_neuron + 1)]}
for _ in range(n_output)]
network.append(output_layer)
return network
def activation(x, function_type):
# Return activation function according to user input
logistic = 1.0 / (1.0 + exp(-x))
tanh = (exp(2 * x) - 1) / (exp(2 * x) + 1)
rluf = max(0, x)
functions = {'logistic': logistic, 'tanh': tanh, 'rluf': rluf}
try:
return functions[function_type]
except KeyError:
print('Current activation function is not supported, logistic is used')
return functions['logistic']
def summing_junction(neuron, inputs):
bias = neuron['weights'][-1]
neuron_count = len(neuron['weights'])-1
return sum([inputs[i] * neuron['weights'][i] for i in range(neuron_count)]) + bias
def neuron_output(layer, inputs, activation_func, regression, output_layer):
# Compute the output from each neuron. If regression, output layer is linear
# Parameters:
# layer: layer of the network as list.
# inputs: list of inputs for the layer.
# activation_func: activation function as string
# Returns:
# output of the layer as list.
outputs = list()
for i, neuron in enumerate(layer):
linear_response = summing_junction(neuron, inputs)
if regression and output_layer:
output = linear_response
else:
output = activation(linear_response, activation_func)
neuron['output'] = output
outputs.append(output)
return outputs
def feed_forward(network, inputs, activation_func, regression=False):
# Feed forwarding
# Parameters:
# layer: network weights as list of dictionaries.
# inputs: list of inputs for the layer.
# activation_func: activation function as string
# Returns:
# output of the whole network.
new_inputs = inputs
is_output_layer = False
for i, layer in enumerate(network):
if i + 1 == len(network):
is_output_layer = True
new_inputs = neuron_output(layer, new_inputs, activation_func, regression, is_output_layer)
if regression:
return new_inputs[0]
else:
return new_inputs
def derivative_activation(x, function_type):
# Return derivative of activation function
logistic_derivative = x * (1.0 - x)
tanh_derivative = 4 / ((exp(-x) + exp(x)) ** 2)
functions = {'logistic': logistic_derivative, 'tanh': tanh_derivative}
return functions[function_type]
def backpropagate(network, desired_outputs, activation_func, regression):
# Backpropagates through network and stores backpropagation error for each neuron.
# Parameters:
# network: forward propagated network.
# desired _outputs: learning target as list.
# activation_func: activation function as string
n_layers = len(network)
for i in reversed(range(n_layers)):
layer = network[i]
costs = list()
output_layer_ix = n_layers - 1
if i != output_layer_ix:
for j in range(len(layer)):
outer_layer = network[i + 1]
cost = sum([neuron['weights'][j] * neuron['delta'] for neuron in outer_layer])
costs.append(cost)
else:
for desired_output, neuron in zip(desired_outputs, layer):
cost = desired_output - neuron['output']
costs.append(cost)
for neuron, cost in zip(layer, costs):
if regression and i == output_layer_ix:
neuron['delta'] = cost
else:
neuron['delta'] = cost*derivative_activation(neuron['output'], activation_func)
def update_weights(network, rate, input_list):
# Update weights according to following: weight = weight + learning_rate * delta * input
# Parameters:
# network: back-propagated network.
# rate: learning rate.
# input_list: training data instance as list
for i, layer in enumerate(network):
if i == 0:
inputs = input_list[:-1]
else:
inputs = [neuron['output'] for neuron in network[i - 1]]
for neuron in layer:
# inputs = [inputs[0] for i in range(len(neuron['weights'])-1)]
for j in range(len(inputs)):
neuron['weights'][j] += rate * neuron['delta'] * inputs[j]
neuron['weights'][-1] += rate * neuron['delta']
def error_calc(desired_outputs, outputs, regression=False):
# Compute training error for single training sample
if regression:
outputs = [outputs]
return sum([(desired_outputs[i] - outputs[i]) ** 2 for i in range(len(desired_outputs))])
def train_predictor(network, data, learning_rate_init, n_epochs, n_output, activation_func='logistic',
learning_rate='constant', print_learning=False):
# Training of the network
# Parameters:
# network: initialized network.
# data: learning data where targets are in the last column.
# learning_rate_init: initial learning rate.
# n_epochs: the number of epochs.
# n_outputs: the number of outputs
# activation: chosen activation function
# learning_rate: learning rate adjusting method
# print_learning: is learning printed
regression = False
if n_output == 1:
regression = True
iter_count = 1
for epoch in range(n_epochs):
error_sum = 0
for row in data:
iter_count += 1
if regression:
desired_response = [row[-1]]
else:
desired_response = [0 for _ in range(n_output)]
desired_response[int(row[-1])] = 1
outputs = feed_forward(network, row, activation_func, regression=regression)
error_sum += error_calc(desired_response, outputs, regression)
backpropagate(network, desired_response, activation_func, regression)
if learning_rate == 'decreasive':
rate = learning_rate_init / (1 + iter_count / 100)
else:
rate = learning_rate_init
update_weights(network, rate, row)
if print_learning:
print('epoch=%d, error=%.3f' % (epoch, error_sum))
return network, True
def predict_values(trained, network, data, n_output, activation_func, pred_prob=False):
# Predict targets
# Parameters:
# trained: boolean value
# network: trained network.
# data: features.
# n_output: the number of outputs:
# activation: activation function used
# pred_prob: whether to predict crips classes or probabilites
# Returns:
# list of predictions.
if trained:
predictions = list()
predictions_prob = list()
for row in data:
output = feed_forward(network, row, activation_func)
if n_output > 1:
if activation == 'logistic':
predictions_prob.append([output[i] / sum(output) for i in range(n_output)])
if activation == 'tanh':
predictions_prob.append([abs(output[i]) / sum(map(abs, output)) for i in range(n_output)])
predictions.append(output.index(max(map(abs, output))))
if not pred_prob:
return predictions
else:
return predictions_prob
else:
raise Exception('Network is not trained!')
class MlpClassifier:
def __init__(self, n_input, n_hidden_neuron, n_hidden_layer=2, n_output=2, activation_func='logistic'):
# Initialize the network
# Parameters:
# n_input: the number of inputs.
# n_hidden: the number of neurons in hidden layers.
# n_hidden_layer: the number of hidden layers.
# n_outputs: the number of outputs.
# activation: activation function on each neuron.
seed(1)
self.trained = False
self.predicted = None
self.predicted_prob = None
self.predictions = None
self.n_input = n_input
self.n_output = n_output
self.n_hidden_neuron = n_hidden_neuron
self.n_hidden_layer = n_hidden_layer
self.activation = activation_func
self.network = initialize(n_input, n_hidden_layer, n_hidden_neuron, n_output)
def train(self, data, learning_rate_init, n_epochs, learning_rate='constant', print_learning=False):
# Training of the network
# Parameters:
# data: learning data where targets are in the last column.
# learning_rate_init: initial learning rate.
# n_epochs: the number of epochs.
# learning_rate: learning rate adjusting method
# print_learning: is learning printed
self.network, self.trained = train_predictor(self.network, data, learning_rate_init, n_epochs, self.n_output,
self.activation, learning_rate, print_learning)
def predict(self, data, pred_prob=False):
# Predict targets
# Parameters:
# network: trained network.
# data: features.
# pred_prob: whether to predict crips classes or probabilites
# Return:
# list of predictions.
if pred_prob:
self.predictions = predict_values(self.trained, self.network, data, self.n_output, self.activation,
pred_prob)
return self.predictions
else:
self.predicted_prob = predict_values(self.trained, self.network, data, self.n_output, self.activation,
pred_prob)
return self.predicted_prob
def score(self, true_classes):
n_equal = 0
for true, pred in zip(true_classes, self.predicted):
if true == pred:
n_equal += 1
return n_equal / len(true_classes)
class MlpRegressor:
def __init__(self, n_input, n_hidden_neuron, n_hidden_layer, activation_func):
# Initialize the network
# Parameters:
# n_input: the number of inputs.
# n_hidden: the number of neurons in hidden layers.
# n_hidden_layer: the number of hidden layers.
# activation: activation function on each neuron.
seed(1)
self.trained = False
self.predictions = None
self.n_input = n_input
self.n_hidden_neuron = n_hidden_neuron
self.n_hidden_layer = n_hidden_layer
self.n_output = 1
self.activation = activation_func
self.network = initialize(n_input, n_hidden_layer, n_hidden_neuron, 1)
self.predictions = None
def train(self, data, learning_rate_init, n_epochs, learning_rate='constant', print_learning=False):
# Training of the network
# Parameters:
# network: initialized network.
# date: learning data where targets are in the last column.
# rate: learning rate.
# n_epochs: the number of epochs.
self.network, self.trained = train_predictor(self.network, data, learning_rate_init, n_epochs, self.n_output,
self.activation, learning_rate, print_learning)
def predict(self, data):
# Predict targets
# Parameters:
# network: trained network.
# data: features.
# Return:
# list of predictions.
self.predictions = predict_values(self.trained, self.network, data, self.n_output, self.activation,
pred_prob=False)
return self.predictions
def score_mse(self, true_values):
# Return prediction performance as mean squared error
n_samples = len(true_values)
total_squared_error = 0
for true, predicted in zip(true_values, self.predictions):
total_squared_error += (true - predicted) ** 2
return 1 / n_samples * total_squared_error
|
kapalk/feedforward-multilayer-perceptron
|
neural_network/multilayer_perceptron.py
|
Python
|
mit
| 13,097
|
[
"NEURON"
] |
f38b4c639528e739c983b8476c57d1c4fd109bd270803771aebc68a18af54752
|
# -*- coding: utf-8 -*-
u"""elegant execution template.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pkresource
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo import simulation_db
from sirepo.template import code_variable
from sirepo.template import elegant_command_importer
from sirepo.template import elegant_common
from sirepo.template import elegant_lattice_importer
from sirepo.template import lattice
from sirepo.template import sdds_util
from sirepo.template import template_common
from sirepo.template.lattice import LatticeUtil
import copy
import glob
import math
import os
import os.path
import py.path
import re
import sdds
import sirepo.sim_data
import stat
_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals()
ELEGANT_LOG_FILE = 'elegant.log'
WANT_BROWSER_FRAME_CACHE = True
_ELEGANT_SEMAPHORE_FILE = 'run_setup.semaphore'
_FIELD_LABEL = PKDict(
x='x [m]',
xp="x' [rad]",
y='y [m]',
yp="y' [rad]",
t='t [s]',
p='p (mₑc)',
s='s [m]',
LinearDensity='Linear Density (C/s)',
LinearDensityDeriv='LinearDensityDeriv (C/s²)',
GammaDeriv='GammaDeriv (1/m)',
)
_FILE_ID_SEP = '-'
_OUTPUT_INFO_FILE = 'outputInfo.json'
_OUTPUT_INFO_VERSION = '2'
_PLOT_TITLE = PKDict({
'x-xp': 'Horizontal',
'y-yp': 'Vertical',
'x-y': 'Cross-section',
't-p': 'Longitudinal',
})
_SDDS_INDEX = 0
_s = sdds.SDDS(_SDDS_INDEX)
_x = getattr(_s, 'SDDS_LONGDOUBLE', None)
_SDDS_DOUBLE_TYPES = [_s.SDDS_DOUBLE, _s.SDDS_FLOAT] + ([_x] if _x else [])
_SDDS_STRING_TYPE = _s.SDDS_STRING
_SIMPLE_UNITS = ['m', 's', 'C', 'rad', 'eV']
_X_FIELD = 's'
class CommandIterator(lattice.ElementIterator):
def start(self, model):
super(CommandIterator, self).start(model)
if model._type == 'run_setup':
self.fields.append(['semaphore_file', _ELEGANT_SEMAPHORE_FILE])
class OutputFileIterator(lattice.ModelIterator):
def __init__(self):
self.result = PKDict(
keys_in_order=[],
)
self.model_index = PKDict()
def field(self, model, field_schema, field):
self.field_index += 1
if field_schema[1] == 'OutputFile':
if LatticeUtil.is_command(model):
suffix = _command_file_extension(model)
filename = '{}{}.{}.{}'.format(
model._type,
self.model_index[self.model_name] if self.model_index[self.model_name] > 1 else '',
field,
suffix)
else:
filename = '{}.{}.sdds'.format(model.name, field)
k = _file_id(model._id, self.field_index)
self.result[k] = filename
self.result.keys_in_order.append(k)
def start(self, model):
self.field_index = 0
self.model_name = LatticeUtil.model_name_for_data(model)
if self.model_name in self.model_index:
self.model_index[self.model_name] += 1
else:
self.model_index[self.model_name] = 1
def background_percent_complete(report, run_dir, is_running):
#TODO(robnagler) remove duplication in run_dir.exists() (outer level?)
errors, last_element = parse_elegant_log(run_dir)
res = PKDict(
percentComplete=100,
frameCount=0,
errors=errors,
)
if is_running:
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
res.percentComplete = _compute_percent_complete(data, last_element)
return res
if not run_dir.join(_ELEGANT_SEMAPHORE_FILE).exists():
return res
output_info = _output_info(run_dir)
return PKDict(
percentComplete=100,
frameCount=1,
outputInfo=output_info,
lastUpdateTime=output_info[0]['lastUpdateTime'],
errors=errors,
)
def copy_related_files(data, source_path, target_path):
# copy results and log for the long-running simulations
for m in ('animation',):
# copy any simulation output
s = pkio.py_path(source_path).join(m)
if not s.exists():
continue
t = pkio.py_path(target_path).join(m)
pkio.mkdir_parent(str(t))
for f in pkio.sorted_glob('*'):
f.copy(t)
def generate_parameters_file(data, is_parallel=False):
_validate_data(data, _SCHEMA)
res, v = template_common.generate_parameters_file(data)
v.rpn_variables = _generate_variables(data)
if is_parallel:
return res + _generate_full_simulation(data, v)
if data.get('report', '') == 'twissReport':
return res + _generate_twiss_simulation(data, v)
return res + _generate_bunch_simulation(data, v)
def get_application_data(data, **kwargs):
if data.method == 'get_beam_input_type':
if data.input_file:
data.input_type = _sdds_beam_type_from_file(data.input_file)
return data
if data.method == 'rpn_value':
v, err = _code_var(data.variables).eval_var(data.value)
if err:
data.error = err
else:
data.result = v
return data
if data.method == 'recompute_rpn_cache_values':
_code_var(data.variables).recompute_cache(data.cache)
return data
if data.method == 'validate_rpn_delete':
model_data = simulation_db.read_json(
simulation_db.sim_data_file(SIM_TYPE, data.simulationId))
data.error = _code_var(data.variables).validate_var_delete(data.name, model_data, _SCHEMA)
return data
raise RuntimeError('unknown application data method: {}'.format(data.method))
def _code_var(variables):
return elegant_lattice_importer.elegant_code_var(variables)
def _file_id(model_id, field_index):
return '{}{}{}'.format(model_id, _FILE_ID_SEP, field_index)
def _file_name_from_id(file_id, model_data, run_dir):
return str(run_dir.join(
_get_filename_for_element_id(file_id.split(_FILE_ID_SEP), model_data)))
def get_data_file(run_dir, model, frame, options=None, **kwargs):
def _sdds(filename):
path = run_dir.join(filename)
assert path.check(file=True, exists=True), \
'{}: not found'.format(path)
if not options.suffix:
with open(str(path)) as f:
return path.basename, f.read(), 'application/octet-stream'
if options.suffix == 'csv':
out = elegant_common.subprocess_output(['sddsprintout', '-columns', '-spreadsheet=csv', str(path)])
assert out, \
'{}: invalid or empty output from sddsprintout'.format(path)
return path.purebasename + '.csv', out, 'text/csv'
raise AssertionError('{}: invalid suffix for download path={}'.format(options.suffix, path))
if frame >= 0:
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
# ex. elementAnimation17-55
i = re.sub(r'elementAnimation', '', model).split(_FILE_ID_SEP)
return _sdds(_get_filename_for_element_id(i, data))
if model == _SIM_DATA.compute_model(None):
path = run_dir.join(ELEGANT_LOG_FILE)
if not path.exists():
return 'elegant-output.txt', '', 'text/plain'
with open(str(path)) as f:
return 'elegant-output.txt', f.read(), 'text/plain'
if model == 'beamlineReport':
data = simulation_db.read_json(str(run_dir.join('..', simulation_db.SIMULATION_DATA_FILE)))
source = generate_parameters_file(data, is_parallel=True)
return 'python-source.py', source, 'text/plain'
return _sdds(_report_output_filename('bunchReport'))
def import_file(req, test_data=None, **kwargs):
# input_data is passed by test cases only
input_data = test_data
if 'simulationId' in req.req_data:
input_data = simulation_db.read_simulation_json(elegant_common.SIM_TYPE, sid=req.req_data.simulationId)
if re.search(r'.ele$', req.filename, re.IGNORECASE):
data = elegant_command_importer.import_file(req.file_stream.read())
elif re.search(r'.lte$', req.filename, re.IGNORECASE):
data = elegant_lattice_importer.import_file(req.file_stream.read(), input_data)
if input_data:
_map_commands_to_lattice(data)
else:
raise IOError('invalid file extension, expecting .ele or .lte')
data.models.simulation.name = re.sub(r'\.(lte|ele)$', '', req.filename, flags=re.IGNORECASE)
if input_data and not test_data:
simulation_db.delete_simulation(elegant_common.SIM_TYPE, input_data.models.simulation.simulationId)
return data
def parse_elegant_log(run_dir):
path = run_dir.join(ELEGANT_LOG_FILE)
if not path.exists():
return '', 0
res = ''
last_element = None
text = pkio.read_text(str(path))
want_next_line = False
prev_line = ''
prev_err = ''
for line in text.split('\n'):
if line == prev_line:
continue
match = re.search('^Starting (\S+) at s\=', line)
if match:
name = match.group(1)
if not re.search('^M\d+\#', name):
last_element = name
if want_next_line:
res += line + '\n'
want_next_line = False
elif _is_ignore_error_text(line):
pass
elif _is_error_text(line):
if len(line) < 10:
want_next_line = True
else:
if line != prev_err:
res += line + '\n'
prev_err = line
prev_line = line
return res, last_element
def prepare_for_client(data):
if 'models' not in data:
return data
data.models.rpnCache = _code_var(data.models.rpnVariables).compute_cache(data, _SCHEMA)
return data
def prepare_output_file(run_dir, data):
if data.report == 'twissReport' or 'bunchReport' in data.report:
fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
if fn.exists():
fn.remove()
output_file = run_dir.join(_report_output_filename(data.report))
if output_file.exists():
save_report_data(data, run_dir)
def python_source_for_model(data, model):
return generate_parameters_file(data, is_parallel=True) + '''
with open('elegant.lte', 'w') as f:
f.write(lattice_file)
with open('elegant.ele', 'w') as f:
f.write(elegant_file)
import os
os.system('elegant elegant.ele')
'''
def remove_last_frame(run_dir):
pass
def save_report_data(data, run_dir):
a = copy.deepcopy(data.models[data.report])
a.frameReport = data.report
if a.frameReport == 'twissReport':
a.x = 's'
a.y = a.y1
a.frameIndex = 0
simulation_db.write_result(
_extract_report_data(str(run_dir.join(_report_output_filename(a.frameReport))), a),
run_dir=run_dir,
)
def sim_frame(frame_args):
r = frame_args.frameReport
page_count = 0
for info in _output_info(frame_args.run_dir):
if info.modelKey == r:
page_count = info.pageCount
frame_args.fieldRange = info.fieldRange
frame_args.y = frame_args.y1
return _extract_report_data(
_file_name_from_id(
frame_args.xFileId,
frame_args.sim_in,
frame_args.run_dir,
),
frame_args,
page_count=page_count,
)
def validate_file(file_type, path):
err = None
if file_type == 'bunchFile-sourceFile':
err = 'expecting sdds file with (x, xp, y, yp, t, p) or (r, pr, pz, t, pphi) columns'
if sdds.sddsdata.InitializeInput(_SDDS_INDEX, str(path)) == 1:
beam_type = _sdds_beam_type(sdds.sddsdata.GetColumnNames(_SDDS_INDEX))
if beam_type in ('elegant', 'spiffe'):
sdds.sddsdata.ReadPage(_SDDS_INDEX)
if len(sdds.sddsdata.GetColumn(_SDDS_INDEX, 0)) > 0:
err = None
else:
err = 'sdds file contains no rows'
sdds.sddsdata.Terminate(_SDDS_INDEX)
return err
def webcon_generate_lattice(data):
# Used by Webcon
util = LatticeUtil(data, _SCHEMA)
return _generate_lattice(_build_filename_map_from_util(util), util)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
generate_parameters_file(
data,
is_parallel,
),
)
for b in _SIM_DATA.lib_file_basenames(data):
if re.search(r'SCRIPT-commandFile', b):
os.chmod(str(run_dir.join(b)), stat.S_IRUSR | stat.S_IXUSR)
def _ast_dump(node, annotate_fields=True, include_attributes=False, indent=' '):
"""
Taken from:
https://bitbucket.org/takluyver/greentreesnakes/src/587ad72894bc7595bc30e33affaa238ac32f0740/astpp.py?at=default&fileviewer=file-view-default
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, ast.AST):
fields = [(a, _format(b, level)) for a, b in ast.iter_fields(node)]
if include_attributes and node._attributes:
fields.extend(
[(a, _format(getattr(node, a), level))
for a in node._attributes],
)
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')',
])
elif isinstance(node, list):
lines = ['[']
lines.extend(
(indent * (level + 2) + _format(x, level + 2) + ','
for x in node),
)
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if not isinstance(node, ast.AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def _build_filename_map(data):
return _build_filename_map_from_util(LatticeUtil(data, _SCHEMA))
def _build_filename_map_from_util(util):
return util.iterate_models(OutputFileIterator()).result
def _command_file_extension(model):
if model._type == 'save_lattice':
return 'lte'
if model._type == 'global_settings':
return 'txt'
return 'sdds'
def _compute_percent_complete(data, last_element):
if not last_element:
return 0
elements = PKDict()
for e in data.models.elements:
elements[e._id] = e
beamlines = PKDict()
for b in data.models.beamlines:
beamlines[b.id] = b
id = data.models.simulation.visualizationBeamlineId
beamline_map = PKDict()
count = _walk_beamline(beamlines[id], 1, elements, beamlines, beamline_map)
index = beamline_map[last_element] if last_element in beamline_map else 0
res = index * 100 / count
if res > 100:
return 100
return res
def _contains_columns(column_names, search):
for col in search:
if col not in column_names:
return False
return True
def _correct_halo_gaussian_distribution_type(m):
# the halo(gaussian) value will get validated/escaped to halogaussian, change it back
if 'distribution_type' in m and 'halogaussian' in m.distribution_type:
m.distribution_type = m.distribution_type.replace('halogaussian', 'halo(gaussian)')
def _extract_report_data(xFilename, frame_args, page_count=0):
page_index = frame_args.frameIndex
xfield = frame_args.x if 'x' in frame_args else frame_args[_X_FIELD]
# x, column_names, x_def, err
x_col = sdds_util.extract_sdds_column(xFilename, xfield, page_index)
if x_col['err']:
return x_col['err']
x = x_col['values']
if not _is_histogram_file(xFilename, x_col['column_names']):
# parameter plot
plots = []
filename = PKDict(
y1=xFilename,
#TODO(pjm): y2Filename, y3Filename are not currently used. Would require rescaling x value across files.
y2=xFilename,
y3=xFilename,
)
for f in ('y1', 'y2', 'y3'):
if re.search(r'^none$', frame_args[f], re.IGNORECASE) or frame_args[f] == ' ':
continue
yfield = frame_args[f]
y_col = sdds_util.extract_sdds_column(filename[f], yfield, page_index)
if y_col['err']:
return y_col['err']
y = y_col['values']
plots.append(PKDict(
field=yfield,
points=y,
label=_field_label(yfield, y_col['column_def'][1]),
))
title = ''
if page_count > 1:
title = 'Plot {} of {}'.format(page_index + 1, page_count)
return template_common.parameter_plot(x, plots, frame_args, PKDict(
title=title,
y_label='',
x_label=_field_label(xfield, x_col['column_def'][1]),
))
yfield = frame_args['y1'] if 'y1' in frame_args else frame_args['y']
y_col = sdds_util.extract_sdds_column(xFilename, yfield, page_index)
if y_col['err']:
return y_col['err']
return template_common.heatmap([x, y_col['values']], frame_args, PKDict(
x_label=_field_label(xfield, x_col['column_def'][1]),
y_label=_field_label(yfield, y_col['column_def'][1]),
title=_plot_title(xfield, yfield, page_index, page_count),
))
def _field_label(field, units):
if field in _FIELD_LABEL:
return _FIELD_LABEL[field]
if units in _SIMPLE_UNITS:
return '{} [{}]'.format(field, units)
return field
def _file_info(filename, run_dir, id, output_index):
file_path = run_dir.join(filename)
if not re.search(r'.sdds$', filename, re.IGNORECASE):
if file_path.exists():
return PKDict(
isAuxFile=True,
filename=filename,
id=_file_id(id, output_index),
lastUpdateTime=int(os.path.getmtime(str(file_path))),
)
return None
try:
if sdds.sddsdata.InitializeInput(_SDDS_INDEX, str(file_path)) != 1:
return None
column_names = sdds.sddsdata.GetColumnNames(_SDDS_INDEX)
plottable_columns = []
double_column_count = 0
field_range = PKDict()
for col in column_names:
col_type = sdds.sddsdata.GetColumnDefinition(_SDDS_INDEX, col)[4]
if col_type < _SDDS_STRING_TYPE:
plottable_columns.append(col)
if col_type in _SDDS_DOUBLE_TYPES:
double_column_count += 1
field_range[col] = []
parameter_names = sdds.sddsdata.GetParameterNames(_SDDS_INDEX)
parameters = dict([(p, []) for p in parameter_names])
page_count = 0
row_counts = []
while True:
if sdds.sddsdata.ReadPage(_SDDS_INDEX) <= 0:
break
row_counts.append(sdds.sddsdata.RowCount(_SDDS_INDEX))
page_count += 1
for i, p in enumerate(parameter_names):
parameters[p].append(_safe_sdds_value(sdds.sddsdata.GetParameter(_SDDS_INDEX, i)))
for col in column_names:
try:
values = sdds.sddsdata.GetColumn(
_SDDS_INDEX,
column_names.index(col),
)
except SystemError:
# incorrectly generated sdds file
break
if not len(values):
pass
elif len(field_range[col]):
field_range[col][0] = min(_safe_sdds_value(min(values)), field_range[col][0])
field_range[col][1] = max(_safe_sdds_value(max(values)), field_range[col][1])
else:
field_range[col] = [_safe_sdds_value(min(values)), _safe_sdds_value(max(values))]
return PKDict(
isAuxFile=False if double_column_count > 1 else True,
filename=filename,
id=_file_id(id, output_index),
rowCounts=row_counts,
pageCount=page_count,
columns=column_names,
parameters=parameters,
parameterDefinitions=_parameter_definitions(parameters),
plottableColumns=plottable_columns,
lastUpdateTime=int(os.path.getmtime(str(file_path))),
isHistogram=_is_histogram_file(filename, column_names),
fieldRange=field_range,
)
finally:
try:
sdds.sddsdata.Terminate(_SDDS_INDEX)
except Exception:
pass
def _find_first_command(data, command_type):
for m in data.models.commands:
if m._type == command_type:
return m
return None
def _format_field_value(state, model, field, el_type):
value = model[field]
if el_type.endswith('StringArray'):
return ['{}[0]'.format(field), value]
if el_type == 'RPNValue':
value = _format_rpn_value(value, is_command=LatticeUtil.is_command(model))
elif el_type == 'OutputFile':
value = state.filename_map[_file_id(model._id, state.field_index)]
elif el_type.startswith('InputFile'):
value = _SIM_DATA.lib_file_name_with_model_field(LatticeUtil.model_name_for_data(model), field, value)
if el_type == 'InputFileXY':
value += '={}+{}'.format(model[field + 'X'], model[field + 'Y'])
elif el_type == 'BeamInputFile':
value = 'bunchFile-sourceFile.{}'.format(value)
elif el_type == 'LatticeBeamlineList':
value = state.id_map[int(value)].name
elif el_type == 'ElegantLatticeList':
if value and value == 'Lattice':
value = 'elegant.lte'
else:
value = value + '.filename.lte'
elif field == 'command' and LatticeUtil.model_name_for_data(model) == 'SCRIPT':
for f in ('commandFile', 'commandInputFile'):
if f in model and model[f]:
fn = _SIM_DATA.lib_file_name_with_model_field(model.type, f, model[f])
value = re.sub(r'\b' + re.escape(model[f]) + r'\b', fn, value)
if model.commandFile:
value = './' + value
if not _is_numeric(el_type, value):
value = '"{}"'.format(value)
return [field, value]
def _format_rpn_value(value, is_command=False):
if code_variable.CodeVar.is_var_value(value):
value = code_variable.CodeVar.infix_to_postfix(value)
if is_command:
return '({})'.format(value)
return value
def _generate_bunch_simulation(data, v):
for f in _SCHEMA.model.bunch:
info = _SCHEMA.model.bunch[f]
if info[1] == 'RPNValue':
field = 'bunch_{}'.format(f)
v[field] = _format_rpn_value(v[field], is_command=True)
longitudinal_method = int(data.models.bunch.longitudinalMethod)
# sigma s, sigma dp, dp s coupling
if longitudinal_method == 1:
v.bunch_emit_z = 0
v.bunch_beta_z = 0
v.bunch_alpha_z = 0
# sigma s, sigma dp, alpha z
elif longitudinal_method == 2:
v.bunch_emit_z = 0
v.bunch_beta_z = 0
v.bunch_dp_s_coupling = 0
# emit z, beta z, alpha z
elif longitudinal_method == 3:
v.bunch_sigma_dp = 0
v.bunch_sigma_s = 0
v.bunch_dp_s_coupling = 0
if data.models.bunchSource.inputSource == 'sdds_beam':
v.bunch_beta_x = 5
v.bunch_beta_y = 5
v.bunch_alpha_x = 0
v.bunch_alpha_x = 0
if v.bunchFile_sourceFile and v.bunchFile_sourceFile != 'None':
v.bunchInputFile = _SIM_DATA.lib_file_name_with_model_field('bunchFile', 'sourceFile', v.bunchFile_sourceFile)
v.bunchFileType = _sdds_beam_type_from_file(v.bunchInputFile)
if str(data.models.bunch.p_central_mev) == '0':
run_setup = _find_first_command(data, 'run_setup')
if run_setup and run_setup.expand_for:
v.bunchExpandForFile = 'expand_for = "{}",'.format(
_SIM_DATA.lib_file_name_with_model_field('command_run_setup', 'expand_for', run_setup.expand_for))
v.bunchOutputFile = _report_output_filename('bunchReport')
return template_common.render_jinja(SIM_TYPE, v, 'bunch.py')
def _generate_commands(filename_map, util):
commands = util.iterate_models(
CommandIterator(filename_map, _format_field_value),
'commands').result
res = ''
for c in commands:
res += '\n' + '&{}'.format(c[0]._type) + '\n'
for f in c[1]:
res += ' {} = {},'.format(f[0], f[1]) + '\n'
res += '&end' + '\n'
return res
def _generate_full_simulation(data, v):
util = LatticeUtil(data, _SCHEMA)
if data.models.simulation.backtracking == '1':
_setup_backtracking(util)
filename_map = _build_filename_map_from_util(util)
v.update(dict(
commands=_generate_commands(filename_map, util),
lattice=_generate_lattice(filename_map, util),
simulationMode=data.models.simulation.simulationMode,
))
return template_common.render_jinja(SIM_TYPE, v)
def _generate_lattice(filename_map, util):
return util.render_lattice_and_beamline(
lattice.LatticeIterator(filename_map, _format_field_value),
quote_name=True)
def _generate_twiss_simulation(data, v):
max_id = _SIM_DATA.elegant_max_id(data)
sim = data.models.simulation
sim.simulationMode = 'serial'
run_setup = _find_first_command(data, 'run_setup') or PKDict(
_id=max_id + 1,
_type='run_setup',
lattice='Lattice',
p_central_mev=data.models.bunch.p_central_mev,
)
run_setup.use_beamline = sim.activeBeamlineId
twiss_output = _find_first_command(data, 'twiss_output') or PKDict(
_id=max_id + 2,
_type='twiss_output',
filename='1',
)
twiss_output.final_values_only = '0'
twiss_output.output_at_each_step = '0'
data.models.commands = [
run_setup,
twiss_output,
]
return _generate_full_simulation(data, v)
def _generate_variable(name, variables, visited):
res = ''
if name not in visited:
res += '% ' + '{} sto {}'.format(_format_rpn_value(variables[name]), name) + '\n'
visited[name] = True
return res
def _generate_variables(data):
res = ''
visited = PKDict()
code_var = _code_var(data.models.rpnVariables)
for name in sorted(code_var.postfix_variables):
for dependency in code_var.get_expr_dependencies(code_var.postfix_variables[name]):
res += _generate_variable(dependency, code_var.postfix_variables, visited)
res += _generate_variable(name, code_var.postfix_variables, visited)
return res
def _get_filename_for_element_id(id, data):
return _build_filename_map(data)['{}{}{}'.format(id[0], _FILE_ID_SEP, id[1])]
def _is_error_text(text):
return re.search(r'^warn|^error|wrong units|^fatal |no expansion for entity|unable to|warning\:|^0 particles left|^unknown token|^terminated by sig|no such file or directory|no parameter name found|Problem opening |Terminated by SIG|No filename given|^MPI_ERR', text, re.IGNORECASE)
def _is_histogram_file(filename, columns):
filename = os.path.basename(filename)
if re.search(r'^closed_orbit.output', filename):
return False
if 'xFrequency' in columns and 'yFrequency' in columns:
return False
if ('x' in columns and 'xp' in columns) \
or ('y' in columns and 'yp' in columns) \
or ('t' in columns and 'p' in columns):
return True
return False
def _is_ignore_error_text(text):
return re.search(r'^warn.* does not have a parameter', text, re.IGNORECASE)
def _is_numeric(el_type, value):
return el_type in ('RPNValue', 'RPNBoolean', 'Integer', 'Float') \
and re.search(r'^[\-\+0-9eE\.]+$', str(value))
def _map_commands_to_lattice(data):
for cmd in data.models.commands:
if cmd._type == 'run_setup':
cmd.lattice = 'Lattice'
break
for cmd in data.models.commands:
if cmd._type == 'run_setup':
name = cmd.use_beamline.upper()
for bl in data.models.beamlines:
if bl.name.upper() == name:
cmd.use_beamline = bl.id
break
def _output_info(run_dir):
# cache outputInfo to file, used later for report frames
info_file = run_dir.join(_OUTPUT_INFO_FILE)
if os.path.isfile(str(info_file)):
try:
res = simulation_db.read_json(info_file)
if len(res) == 0 or res[0].get('_version', '') == _OUTPUT_INFO_VERSION:
return res
except ValueError as e:
pass
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
res = []
filename_map = _build_filename_map(data)
for k in filename_map.keys_in_order:
filename = filename_map[k]
id = k.split(_FILE_ID_SEP)
info = _file_info(filename, run_dir, id[0], id[1])
if info:
info.modelKey = 'elementAnimation{}'.format(info.id)
res.append(info)
if len(res):
res[0]['_version'] = _OUTPUT_INFO_VERSION
simulation_db.write_json(info_file, res)
return res
def _parameter_definitions(parameters):
"""Convert parameters to useful definitions"""
res = PKDict()
for p in parameters:
res[p] = dict(zip(
['symbol', 'units', 'description', 'format_string', 'type', 'fixed_value'],
sdds.sddsdata.GetParameterDefinition(_SDDS_INDEX, p),
))
return res
def _plot_title(xfield, yfield, page_index, page_count):
title_key = xfield + '-' + yfield
title = ''
if title_key in _PLOT_TITLE:
title = _PLOT_TITLE[title_key]
else:
title = '{} / {}'.format(xfield, yfield)
if page_count > 1:
title += ', Plot {} of {}'.format(page_index + 1, page_count)
return title
def _report_output_filename(report):
if report == 'twissReport':
return 'twiss_output.filename.sdds'
return 'elegant.bun'
def _safe_sdds_value(v):
if isinstance(v, float) and (math.isinf(v) or math.isnan(v)):
return 0
return v
def _setup_backtracking(util):
def _negative(el, fields):
for f in fields:
if f in el and el[f]:
v = str(el[f])
if re.search(r'^-', v):
v = v[1:]
else:
v = '-' + v
el[f] = v
break
util.data = copy.deepcopy(util.data)
types = PKDict(
bend=[
'BRAT', 'BUMPER', 'CSBEND', 'CSRCSBEND', 'FMULT', 'FTABLE', 'KPOLY', 'KSBEND',
'KQUSE', 'MBUMPER', 'MULT', 'NIBEND', 'NISEPT', 'RBEN', 'SBEN', 'TUBEND'],
mirror=['LMIRROR'],
)
for el in util.data.models.elements:
# change signs on length and angle fields
_negative(el, ('l', 'xmax'))
_negative(el, ('volt', 'voltage', 'initial_v', 'static_voltage'))
if el.type in types.bend:
_negative(el, ('angle', 'kick', 'hkick'))
if el.type in types.mirror:
_negative(el, ('theta', ))
util.select_beamline()['items'].reverse()
def _sdds_beam_type(column_names):
if _contains_columns(column_names, ['x', 'xp', 'y', 'yp', 't', 'p']):
return 'elegant'
if _contains_columns(column_names, ['r', 'pr', 'pz', 't', 'pphi']):
return 'spiffe'
return ''
def _sdds_beam_type_from_file(filename):
res = ''
path = str(_SIM_DATA.lib_file_abspath(filename))
if sdds.sddsdata.InitializeInput(_SDDS_INDEX, path) == 1:
res = _sdds_beam_type(sdds.sddsdata.GetColumnNames(_SDDS_INDEX))
sdds.sddsdata.Terminate(_SDDS_INDEX)
return res
def _validate_data(data, schema):
# ensure enums match, convert ints/floats, apply scaling
enum_info = template_common.validate_models(data, schema)
_correct_halo_gaussian_distribution_type(data.models.bunch)
for model_type in ['elements', 'commands']:
for m in data.models[model_type]:
template_common.validate_model(m, schema.model[LatticeUtil.model_name_for_data(m)], enum_info)
_correct_halo_gaussian_distribution_type(m)
def _walk_beamline(beamline, index, elements, beamlines, beamline_map):
# walk beamline in order, adding (<name>#<count> => index) to beamline_map
for id in beamline['items']:
if id in elements:
name = elements[id].name
if name not in beamline_map:
beamline_map[name] = 0
beamline_map[name] += 1
beamline_map['{}#{}'.format(name.upper(), beamline_map[name])] = index
index += 1
else:
index = _walk_beamline(beamlines[abs(id)], index, elements, beamlines, beamline_map)
return index
|
mrakitin/sirepo
|
sirepo/template/elegant.py
|
Python
|
apache-2.0
| 33,800
|
[
"Gaussian"
] |
da42c96e9be8aee52c2a143516fac86d42371ed612655ed90e62b91653ab1401
|
# -*- coding: utf-8 -*-
"""
functions.py - Miscellaneous functions with no other home
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import division
from .python2_3 import asUnicode
from .Qt import QtGui, QtCore, USE_PYSIDE
Colors = {
'b': QtGui.QColor(0,0,255,255),
'g': QtGui.QColor(0,255,0,255),
'r': QtGui.QColor(255,0,0,255),
'c': QtGui.QColor(0,255,255,255),
'm': QtGui.QColor(255,0,255,255),
'y': QtGui.QColor(255,255,0,255),
'k': QtGui.QColor(0,0,0,255),
'w': QtGui.QColor(255,255,255,255),
'd': QtGui.QColor(150,150,150,255),
'l': QtGui.QColor(200,200,200,255),
's': QtGui.QColor(100,100,150,255),
}
SI_PREFIXES = asUnicode('yzafpnµm kMGTPEZY')
SI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'
from .Qt import QtGui, QtCore, USE_PYSIDE
from . import getConfigOption, setConfigOptions
import numpy as np
import decimal, re
import ctypes
import sys, struct
from . import debug
def siScale(x, minVal=1e-25, allowUnicode=True):
"""
Return the recommended scale factor and SI prefix string for x.
Example::
siScale(0.0001) # returns (1e6, 'μ')
# This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits
"""
if isinstance(x, decimal.Decimal):
x = float(x)
try:
if np.isnan(x) or np.isinf(x):
return(1, '')
except:
print(x, type(x))
raise
if abs(x) < minVal:
m = 0
x = 0
else:
m = int(np.clip(np.floor(np.log(abs(x))/np.log(1000)), -9.0, 9.0))
if m == 0:
pref = ''
elif m < -8 or m > 8:
pref = 'e%d' % (m*3)
else:
if allowUnicode:
pref = SI_PREFIXES[m+8]
else:
pref = SI_PREFIXES_ASCII[m+8]
p = .001**m
return (p, pref)
def siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True):
"""
Return the number x formatted in engineering notation with SI prefix.
Example::
siFormat(0.0001, suffix='V') # returns "100 μV"
"""
if space is True:
space = ' '
if space is False:
space = ''
(p, pref) = siScale(x, minVal, allowUnicode)
if not (len(pref) > 0 and pref[0] == 'e'):
pref = space + pref
if error is None:
fmt = "%." + str(precision) + "g%s%s"
return fmt % (x*p, pref, suffix)
else:
if allowUnicode:
plusminus = space + asUnicode("±") + space
else:
plusminus = " +/- "
fmt = "%." + str(precision) + "g%s%s%s%s"
return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal))
def siEval(s):
"""
Convert a value written in SI notation to its equivalent prefixless value
Example::
siEval("100 μV") # returns 0.0001
"""
s = asUnicode(s)
m = re.match(r'(-?((\d+(\.\d*)?)|(\.\d+))([eE]-?\d+)?)\s*([u' + SI_PREFIXES + r']?).*$', s)
if m is None:
raise Exception("Can't convert string '%s' to number." % s)
v = float(m.groups()[0])
p = m.groups()[6]
#if p not in SI_PREFIXES:
#raise Exception("Can't convert string '%s' to number--unknown prefix." % s)
if p == '':
n = 0
elif p == 'u':
n = -2
else:
n = SI_PREFIXES.index(p) - 8
return v * 1000**n
class Color(QtGui.QColor):
def __init__(self, *args):
QtGui.QColor.__init__(self, mkColor(*args))
def glColor(self):
"""Return (r,g,b,a) normalized for use in opengl"""
return (self.red()/255., self.green()/255., self.blue()/255., self.alpha()/255.)
def __getitem__(self, ind):
return (self.red, self.green, self.blue, self.alpha)[ind]()
def mkColor(*args):
"""
Convenience function for constructing QColor from a variety of argument types. Accepted arguments are:
================ ================================================
'c' one of: r, g, b, c, m, y, k, w
R, G, B, [A] integers 0-255
(R, G, B, [A]) tuple of integers 0-255
float greyscale, 0.0-1.0
int see :func:`intColor() <pyqtgraph.intColor>`
(int, hues) see :func:`intColor() <pyqtgraph.intColor>`
"RGB" hexadecimal strings; may begin with '#'
"RGBA"
"RRGGBB"
"RRGGBBAA"
QColor QColor instance; makes a copy.
================ ================================================
"""
err = 'Not sure how to make a color from "%s"' % str(args)
if len(args) == 1:
if isinstance(args[0], basestring):
c = args[0]
if c[0] == '#':
c = c[1:]
if len(c) == 1:
try:
return Colors[c]
except KeyError:
raise Exception('No color named "%s"' % c)
if len(c) == 3:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = 255
elif len(c) == 4:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = int(c[3]*2, 16)
elif len(c) == 6:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = 255
elif len(c) == 8:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = int(c[6:8], 16)
elif isinstance(args[0], QtGui.QColor):
return QtGui.QColor(args[0])
elif isinstance(args[0], float):
r = g = b = int(args[0] * 255)
a = 255
elif hasattr(args[0], '__len__'):
if len(args[0]) == 3:
(r, g, b) = args[0]
a = 255
elif len(args[0]) == 4:
(r, g, b, a) = args[0]
elif len(args[0]) == 2:
return intColor(*args[0])
else:
raise Exception(err)
elif type(args[0]) == int:
return intColor(args[0])
else:
raise Exception(err)
elif len(args) == 3:
(r, g, b) = args
a = 255
elif len(args) == 4:
(r, g, b, a) = args
else:
raise Exception(err)
args = [r,g,b,a]
args = [0 if np.isnan(a) or np.isinf(a) else a for a in args]
args = list(map(int, args))
return QtGui.QColor(*args)
def mkBrush(*args, **kwds):
"""
| Convenience function for constructing Brush.
| This function always constructs a solid brush and accepts the same arguments as :func:`mkColor() <pyqtgraph.mkColor>`
| Calling mkBrush(None) returns an invisible brush.
"""
if 'color' in kwds:
color = kwds['color']
elif len(args) == 1:
arg = args[0]
if arg is None:
return QtGui.QBrush(QtCore.Qt.NoBrush)
elif isinstance(arg, QtGui.QBrush):
return QtGui.QBrush(arg)
else:
color = arg
elif len(args) > 1:
color = args
return QtGui.QBrush(mkColor(color))
def mkPen(*args, **kargs):
"""
Convenience function for constructing QPen.
Examples::
mkPen(color)
mkPen(color, width=2)
mkPen(cosmetic=False, width=4.5, color='r')
mkPen({'color': "FF0", width: 2})
mkPen(None) # (no pen)
In these examples, *color* may be replaced with any arguments accepted by :func:`mkColor() <pyqtgraph.mkColor>` """
color = kargs.get('color', None)
width = kargs.get('width', 1)
style = kargs.get('style', None)
dash = kargs.get('dash', None)
cosmetic = kargs.get('cosmetic', True)
hsv = kargs.get('hsv', None)
if len(args) == 1:
arg = args[0]
if isinstance(arg, dict):
return mkPen(**arg)
if isinstance(arg, QtGui.QPen):
return QtGui.QPen(arg) ## return a copy of this pen
elif arg is None:
style = QtCore.Qt.NoPen
else:
color = arg
if len(args) > 1:
color = args
if color is None:
color = mkColor('l')
if hsv is not None:
color = hsvColor(*hsv)
else:
color = mkColor(color)
pen = QtGui.QPen(QtGui.QBrush(color), width)
pen.setCosmetic(cosmetic)
if style is not None:
pen.setStyle(style)
if dash is not None:
pen.setDashPattern(dash)
return pen
def hsvColor(hue, sat=1.0, val=1.0, alpha=1.0):
"""Generate a QColor from HSVa values. (all arguments are float 0.0-1.0)"""
c = QtGui.QColor()
c.setHsvF(hue, sat, val, alpha)
return c
def colorTuple(c):
"""Return a tuple (R,G,B,A) from a QColor"""
return (c.red(), c.green(), c.blue(), c.alpha())
def colorStr(c):
"""Generate a hex string code from a QColor"""
return ('%02x'*4) % colorTuple(c)
def intColor(index, hues=9, values=1, maxValue=255, minValue=150, maxHue=360, minHue=0, sat=255, alpha=255, **kargs):
"""
Creates a QColor from a single index. Useful for stepping through a predefined list of colors.
The argument *index* determines which color from the set will be returned. All other arguments determine what the set of predefined colors will be
Colors are chosen by cycling across hues while varying the value (brightness).
By default, this selects from a list of 9 hues."""
hues = int(hues)
values = int(values)
ind = int(index) % (hues * values)
indh = ind % hues
indv = ind / hues
if values > 1:
v = minValue + indv * ((maxValue-minValue) / (values-1))
else:
v = maxValue
h = minHue + (indh * (maxHue-minHue)) / hues
c = QtGui.QColor()
c.setHsv(h, sat, v)
c.setAlpha(alpha)
return c
def glColor(*args, **kargs):
"""
Convert a color to OpenGL color format (r,g,b,a) floats 0.0-1.0
Accepts same arguments as :func:`mkColor <pyqtgraph.mkColor>`.
"""
c = mkColor(*args, **kargs)
return (c.red()/255., c.green()/255., c.blue()/255., c.alpha()/255.)
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
"""
Construct a path outlining an arrow with the given dimensions.
The arrow points in the -x direction with tip positioned at 0,0.
If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
If *tailLen* is None, no tail will be drawn.
"""
headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
path = QtGui.QPainterPath()
path.moveTo(0,0)
path.lineTo(headLen, -headWidth)
if tailLen is None:
innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, 0)
else:
tailWidth *= 0.5
innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, -tailWidth)
path.lineTo(headLen + tailLen, -tailWidth)
path.lineTo(headLen + tailLen, tailWidth)
path.lineTo(innerY, tailWidth)
path.lineTo(headLen, headWidth)
path.lineTo(0,0)
return path
def affineSlice(data, shape, origin, vectors, axes, order=1, returnCoords=False, **kargs):
"""
Take a slice of any orientation through an array. This is useful for extracting sections of multi-dimensional arrays such as MRI images for viewing as 1D or 2D data.
The slicing axes are aribtrary; they do not need to be orthogonal to the original data or even to each other. It is possible to use this function to extract arbitrary linear, rectangular, or parallelepiped shapes from within larger datasets. The original data is interpolated onto a new array of coordinates using scipy.ndimage.map_coordinates if it is available (see the scipy documentation for more information about this). If scipy is not available, then a slower implementation of map_coordinates is used.
For a graphical interface to this function, see :func:`ROI.getArrayRegion <pyqtgraph.ROI.getArrayRegion>`
============== ====================================================================================================
**Arguments:**
*data* (ndarray) the original dataset
*shape* the shape of the slice to take (Note the return value may have more dimensions than len(shape))
*origin* the location in the original dataset that will become the origin of the sliced data.
*vectors* list of unit vectors which point in the direction of the slice axes. Each vector must have the same
length as *axes*. If the vectors are not unit length, the result will be scaled relative to the
original data. If the vectors are not orthogonal, the result will be sheared relative to the
original data.
*axes* The axes in the original dataset which correspond to the slice *vectors*
*order* The order of spline interpolation. Default is 1 (linear). See scipy.ndimage.map_coordinates
for more information.
*returnCoords* If True, return a tuple (result, coords) where coords is the array of coordinates used to select
values from the original dataset.
*All extra keyword arguments are passed to scipy.ndimage.map_coordinates.*
--------------------------------------------------------------------------------------------------------------------
============== ====================================================================================================
Note the following must be true:
| len(shape) == len(vectors)
| len(origin) == len(axes) == len(vectors[i])
Example: start with a 4D fMRI data set, take a diagonal-planar slice out of the last 3 axes
* data = array with dims (time, x, y, z) = (100, 40, 40, 40)
* The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1)
* The origin of the slice will be at (x,y,z) = (40, 0, 0)
* We will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)
The call for this example would look like::
affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))
"""
try:
import scipy.ndimage
have_scipy = True
except ImportError:
have_scipy = False
have_scipy = False
# sanity check
if len(shape) != len(vectors):
raise Exception("shape and vectors must have same length.")
if len(origin) != len(axes):
raise Exception("origin and axes must have same length.")
for v in vectors:
if len(v) != len(axes):
raise Exception("each vector must be same length as axes.")
shape = list(map(np.ceil, shape))
## transpose data so slice axes come first
trAx = list(range(data.ndim))
for x in axes:
trAx.remove(x)
tr1 = tuple(axes) + tuple(trAx)
data = data.transpose(tr1)
#print "tr1:", tr1
## dims are now [(slice axes), (other axes)]
## make sure vectors are arrays
if not isinstance(vectors, np.ndarray):
vectors = np.array(vectors)
if not isinstance(origin, np.ndarray):
origin = np.array(origin)
origin.shape = (len(axes),) + (1,)*len(shape)
## Build array of sample locations.
grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes
#print shape, grid.shape
x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic
x += origin
#print "X values:"
#print x
## iterate manually over unused axes since map_coordinates won't do it for us
if have_scipy:
extraShape = data.shape[len(axes):]
output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)
for inds in np.ndindex(*extraShape):
ind = (Ellipsis,) + inds
output[ind] = scipy.ndimage.map_coordinates(data[ind], x, order=order, **kargs)
else:
# map_coordinates expects the indexes as the first axis, whereas
# interpolateArray expects indexes at the last axis.
tr = tuple(range(1,x.ndim)) + (0,)
output = interpolateArray(data, x.transpose(tr))
tr = list(range(output.ndim))
trb = []
for i in range(min(axes)):
ind = tr1.index(i) + (len(shape)-len(axes))
tr.remove(ind)
trb.append(ind)
tr2 = tuple(trb+tr)
## Untranspose array before returning
output = output.transpose(tr2)
if returnCoords:
return (output, x)
else:
return output
def interpolateArray(data, x, default=0.0):
"""
N-dimensional interpolation similar scipy.ndimage.map_coordinates.
This function returns linearly-interpolated values sampled from a regular
grid of data.
*data* is an array of any shape containing the values to be interpolated.
*x* is an array with (shape[-1] <= data.ndim) containing the locations
within *data* to interpolate.
Returns array of shape (x.shape[:-1] + data.shape)
For example, assume we have the following 2D image data::
>>> data = np.array([[1, 2, 4 ],
[10, 20, 40 ],
[100, 200, 400]])
To compute a single interpolated point from this data::
>>> x = np.array([(0.5, 0.5)])
>>> interpolateArray(data, x)
array([ 8.25])
To compute a 1D list of interpolated locations::
>>> x = np.array([(0.5, 0.5),
(1.0, 1.0),
(1.0, 2.0),
(1.5, 0.0)])
>>> interpolateArray(data, x)
array([ 8.25, 20. , 40. , 55. ])
To compute a 2D array of interpolated locations::
>>> x = np.array([[(0.5, 0.5), (1.0, 2.0)],
[(1.0, 1.0), (1.5, 0.0)]])
>>> interpolateArray(data, x)
array([[ 8.25, 40. ],
[ 20. , 55. ]])
..and so on. The *x* argument may have any shape as long as
```x.shape[-1] <= data.ndim```. In the case that
```x.shape[-1] < data.ndim```, then the remaining axes are simply
broadcasted as usual. For example, we can interpolate one location
from an entire row of the data::
>>> x = np.array([[0.5]])
>>> interpolateArray(data, x)
array([[ 5.5, 11. , 22. ]])
This is useful for interpolating from arrays of colors, vertexes, etc.
"""
prof = debug.Profiler()
nd = data.ndim
md = x.shape[-1]
# First we generate arrays of indexes that are needed to
# extract the data surrounding each point
fields = np.mgrid[(slice(0,2),) * md]
xmin = np.floor(x).astype(int)
xmax = xmin + 1
indexes = np.concatenate([xmin[np.newaxis, ...], xmax[np.newaxis, ...]])
fieldInds = []
totalMask = np.ones(x.shape[:-1], dtype=bool) # keep track of out-of-bound indexes
for ax in range(md):
mask = (xmin[...,ax] >= 0) & (x[...,ax] <= data.shape[ax]-1)
# keep track of points that need to be set to default
totalMask &= mask
# ..and keep track of indexes that are out of bounds
# (note that when x[...,ax] == data.shape[ax], then xmax[...,ax] will be out
# of bounds, but the interpolation will work anyway)
mask &= (xmax[...,ax] < data.shape[ax])
axisIndex = indexes[...,ax][fields[ax]]
#axisMask = mask.astype(np.ubyte).reshape((1,)*(fields.ndim-1) + mask.shape)
axisIndex[axisIndex < 0] = 0
axisIndex[axisIndex >= data.shape[ax]] = 0
fieldInds.append(axisIndex)
prof()
# Get data values surrounding each requested point
# fieldData[..., i] contains all 2**nd values needed to interpolate x[i]
fieldData = data[tuple(fieldInds)]
prof()
## Interpolate
s = np.empty((md,) + fieldData.shape, dtype=float)
dx = x - xmin
# reshape fields for arithmetic against dx
for ax in range(md):
f1 = fields[ax].reshape(fields[ax].shape + (1,)*(dx.ndim-1))
sax = f1 * dx[...,ax] + (1-f1) * (1-dx[...,ax])
sax = sax.reshape(sax.shape + (1,) * (s.ndim-1-sax.ndim))
s[ax] = sax
s = np.product(s, axis=0)
result = fieldData * s
for i in range(md):
result = result.sum(axis=0)
prof()
totalMask.shape = totalMask.shape + (1,) * (nd - md)
result[~totalMask] = default
prof()
return result
def subArray(data, offset, shape, stride):
"""
Unpack a sub-array from *data* using the specified offset, shape, and stride.
Note that *stride* is specified in array elements, not bytes.
For example, we have a 2x3 array packed in a 1D array as follows::
data = [_, _, 00, 01, 02, _, 10, 11, 12, _]
Then we can unpack the sub-array with this call::
subArray(data, offset=2, shape=(2, 3), stride=(4, 1))
..which returns::
[[00, 01, 02],
[10, 11, 12]]
This function operates only on the first axis of *data*. So changing
the input in the example above to have shape (10, 7) would cause the
output to have shape (2, 3, 7).
"""
#data = data.flatten()
data = data[offset:]
shape = tuple(shape)
stride = tuple(stride)
extraShape = data.shape[1:]
#print data.shape, offset, shape, stride
for i in range(len(shape)):
mask = (slice(None),) * i + (slice(None, shape[i] * stride[i]),)
newShape = shape[:i+1]
if i < len(shape)-1:
newShape += (stride[i],)
newShape += extraShape
#print i, mask, newShape
#print "start:\n", data.shape, data
data = data[mask]
#print "mask:\n", data.shape, data
data = data.reshape(newShape)
#print "reshape:\n", data.shape, data
return data
def transformToArray(tr):
"""
Given a QTransform, return a 3x3 numpy array.
Given a QMatrix4x4, return a 4x4 numpy array.
Example: map an array of x,y coordinates through a transform::
## coordinates to map are (1,5), (2,6), (3,7), and (4,8)
coords = np.array([[1,2,3,4], [5,6,7,8], [1,1,1,1]]) # the extra '1' coordinate is needed for translation to work
## Make an example transform
tr = QtGui.QTransform()
tr.translate(3,4)
tr.scale(2, 0.1)
## convert to array
m = pg.transformToArray()[:2] # ignore the perspective portion of the transformation
## map coordinates through transform
mapped = np.dot(m, coords)
"""
#return np.array([[tr.m11(), tr.m12(), tr.m13()],[tr.m21(), tr.m22(), tr.m23()],[tr.m31(), tr.m32(), tr.m33()]])
## The order of elements given by the method names m11..m33 is misleading--
## It is most common for x,y translation to occupy the positions 1,3 and 2,3 in
## a transformation matrix. However, with QTransform these values appear at m31 and m32.
## So the correct interpretation is transposed:
if isinstance(tr, QtGui.QTransform):
return np.array([[tr.m11(), tr.m21(), tr.m31()], [tr.m12(), tr.m22(), tr.m32()], [tr.m13(), tr.m23(), tr.m33()]])
elif isinstance(tr, QtGui.QMatrix4x4):
return np.array(tr.copyDataTo()).reshape(4,4)
else:
raise Exception("Transform argument must be either QTransform or QMatrix4x4.")
def transformCoordinates(tr, coords, transpose=False):
"""
Map a set of 2D or 3D coordinates through a QTransform or QMatrix4x4.
The shape of coords must be (2,...) or (3,...)
The mapping will _ignore_ any perspective transformations.
For coordinate arrays with ndim=2, this is basically equivalent to matrix multiplication.
Most arrays, however, prefer to put the coordinate axis at the end (eg. shape=(...,3)). To
allow this, use transpose=True.
"""
if transpose:
## move last axis to beginning. This transposition will be reversed before returning the mapped coordinates.
coords = coords.transpose((coords.ndim-1,) + tuple(range(0,coords.ndim-1)))
nd = coords.shape[0]
if isinstance(tr, np.ndarray):
m = tr
else:
m = transformToArray(tr)
m = m[:m.shape[0]-1] # remove perspective
## If coords are 3D and tr is 2D, assume no change for Z axis
if m.shape == (2,3) and nd == 3:
m2 = np.zeros((3,4))
m2[:2, :2] = m[:2,:2]
m2[:2, 3] = m[:2,2]
m2[2,2] = 1
m = m2
## if coords are 2D and tr is 3D, ignore Z axis
if m.shape == (3,4) and nd == 2:
m2 = np.empty((2,3))
m2[:,:2] = m[:2,:2]
m2[:,2] = m[:2,3]
m = m2
## reshape tr and coords to prepare for multiplication
m = m.reshape(m.shape + (1,)*(coords.ndim-1))
coords = coords[np.newaxis, ...]
# separate scale/rotate and translation
translate = m[:,-1]
m = m[:, :-1]
## map coordinates and return
mapped = (m*coords).sum(axis=1) ## apply scale/rotate
mapped += translate
if transpose:
## move first axis to end.
mapped = mapped.transpose(tuple(range(1,mapped.ndim)) + (0,))
return mapped
def solve3DTransform(points1, points2):
"""
Find a 3D transformation matrix that maps points1 onto points2.
Points must be specified as either lists of 4 Vectors or
(4, 3) arrays.
"""
import numpy.linalg
pts = []
for inp in (points1, points2):
if isinstance(inp, np.ndarray):
A = np.empty((4,4), dtype=float)
A[:,:3] = inp[:,:3]
A[:,3] = 1.0
else:
A = np.array([[inp[i].x(), inp[i].y(), inp[i].z(), 1] for i in range(4)])
pts.append(A)
## solve 3 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((4,4))
for i in range(3):
## solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = numpy.linalg.solve(pts[0], pts[1][:,i])
return matrix
def solveBilinearTransform(points1, points2):
"""
Find a bilinear transformation matrix (2x4) that maps points1 onto points2.
Points must be specified as a list of 4 Vector, Point, QPointF, etc.
To use this matrix to map a point [x,y]::
mapped = np.dot(matrix, [x*y, x, y, 1])
"""
import numpy.linalg
## A is 4 rows (points) x 4 columns (xy, x, y, 1)
## B is 4 rows (points) x 2 columns (x, y)
A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])
B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])
## solve 2 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((2,4))
for i in range(2):
matrix[i] = numpy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix
return matrix
def rescaleData(data, scale, offset, dtype=None):
"""Return data rescaled and optionally cast to a new dtype::
data => (data-offset) * scale
Uses scipy.weave (if available) to improve performance.
"""
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
try:
if not getConfigOption('useWeave'):
raise Exception('Weave is disabled; falling back to slower version.')
try:
import scipy.weave
except ImportError:
raise Exception('scipy.weave is not importable; falling back to slower version.')
## require native dtype when using weave
if not data.dtype.isnative:
data = data.astype(data.dtype.newbyteorder('='))
if not dtype.isnative:
weaveDtype = dtype.newbyteorder('=')
else:
weaveDtype = dtype
newData = np.empty((data.size,), dtype=weaveDtype)
flat = np.ascontiguousarray(data).reshape(data.size)
size = data.size
code = """
double sc = (double)scale;
double off = (double)offset;
for( int i=0; i<size; i++ ) {
newData[i] = ((double)flat[i] - off) * sc;
}
"""
scipy.weave.inline(code, ['flat', 'newData', 'size', 'offset', 'scale'], compiler='gcc')
if dtype != weaveDtype:
newData = newData.astype(dtype)
data = newData.reshape(data.shape)
except:
if getConfigOption('useWeave'):
if getConfigOption('weaveDebug'):
debug.printExc("Error; disabling weave.")
setConfigOptions(useWeave=False)
#p = np.poly1d([scale, -offset*scale])
#data = p(data).astype(dtype)
d2 = data-offset
d2 *= scale
data = d2.astype(dtype)
return data
def applyLookupTable(data, lut):
"""
Uses values in *data* as indexes to select values from *lut*.
The returned data has shape data.shape + lut.shape[1:]
Note: color gradient lookup tables can be generated using GradientWidget.
"""
if data.dtype.kind not in ('i', 'u'):
data = data.astype(int)
return np.take(lut, data, axis=0, mode='clip')
def makeRGBA(*args, **kwds):
"""Equivalent to makeARGB(..., useRGBA=True)"""
kwds['useRGBA'] = True
return makeARGB(*args, **kwds)
def makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
Convert an array of values into an ARGB array suitable for building QImages, OpenGL textures, etc.
Returns the ARGB array (values 0-255) and a boolean indicating whether there is alpha channel data.
This is a two stage process:
1) Rescale the data based on the values in the *levels* argument (min, max).
2) Determine the final output by passing the rescaled values through a lookup table.
Both stages are optional.
============== ==================================================================================
**Arguments:**
data numpy array of int/float types. If
levels List [min, max]; optionally rescale data before converting through the
lookup table. The data is rescaled such that min->0 and max->*scale*::
rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))
It is also possible to use a 2D (N,2) array of values for levels. In this case,
it is assumed that each pair of min,max values in the levels array should be
applied to a different subset of the input data (for example, the input data may
already have RGB values and the levels are used to independently scale each
channel). The use of this feature requires that levels.shape[0] == data.shape[-1].
scale The maximum value to which data will be rescaled before being passed through the
lookup table (or returned if there is no lookup table). By default this will
be set to the length of the lookup table, or 256 is no lookup table is provided.
For OpenGL color specifications (as in GLColor4f) use scale=1.0
lut Optional lookup table (array with dtype=ubyte).
Values in data will be converted to color by indexing directly from lut.
The output data shape will be input.shape + lut.shape[1:].
Note: the output of makeARGB will have the same dtype as the lookup table, so
for conversion to QImage, the dtype must be ubyte.
Lookup tables can be built using GradientWidget.
useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures).
The default is False, which returns in ARGB order for use with QImage
(Note that 'ARGB' is a term used by the Qt documentation; the _actual_ order
is BGRA).
============== ==================================================================================
"""
profile = debug.Profiler()
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is not None and not isinstance(levels, np.ndarray):
levels = np.array(levels)
if levels is not None:
if levels.ndim == 1:
if len(levels) != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when bot levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
print(levels)
raise Exception("levels argument must be 1D or 2D.")
profile()
if scale is None:
if lut is not None:
scale = lut.shape[0]
else:
scale = 255.
## Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
## we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal += 1e-16
newData[...,i] = rescaleData(data[...,i], scale/(maxVal-minVal), minVal, dtype=int)
data = newData
else:
minVal, maxVal = levels
if minVal == maxVal:
maxVal += 1e-16
if maxVal == minVal:
data = rescaleData(data, 1, minVal, dtype=int)
else:
data = rescaleData(data, scale/(maxVal-minVal), minVal, dtype=int)
profile()
## apply LUT if given
if lut is not None:
data = applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
profile()
## copy data into ARGB ordered array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
if useRGBA:
order = [0,1,2,3] ## array comes out RGBA
else:
order = [2,1,0,3] ## for some reason, the colors line up as BGR in the final image.
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
profile()
return imgData, alpha
def makeQImage(imgData, alpha=None, copy=True, transpose=True):
"""
Turn an ARGB array into QImage.
By default, the data is copied; changes to the array will not
be reflected in the image. The image will be given a 'data' attribute
pointing to the array which shares its data to prevent python
freeing that memory while the image is in use.
============== ===================================================================
**Arguments:**
imgData Array of data to convert. Must have shape (width, height, 3 or 4)
and dtype=ubyte. The order of values in the 3rd axis must be
(b, g, r, a).
alpha If True, the QImage returned will have format ARGB32. If False,
the format will be RGB32. By default, _alpha_ is True if
array.shape[2] == 4.
copy If True, the data is copied before converting to QImage.
If False, the new QImage points directly to the data in the array.
Note that the array must be contiguous for this to work
(see numpy.ascontiguousarray).
transpose If True (the default), the array x/y axes are transposed before
creating the image. Note that Qt expects the axes to be in
(height, width) order whereas pyqtgraph usually prefers the
opposite.
============== ===================================================================
"""
## create QImage from buffer
profile = debug.Profiler()
## If we didn't explicitly specify alpha, check the array shape.
if alpha is None:
alpha = (imgData.shape[2] == 4)
copied = False
if imgData.shape[2] == 3: ## need to make alpha channel (even if alpha==False; QImage requires 32 bpp)
if copy is True:
d2 = np.empty(imgData.shape[:2] + (4,), dtype=imgData.dtype)
d2[:,:,:3] = imgData
d2[:,:,3] = 255
imgData = d2
copied = True
else:
raise Exception('Array has only 3 channels; cannot make QImage without copying.')
if alpha:
imgFormat = QtGui.QImage.Format_ARGB32
else:
imgFormat = QtGui.QImage.Format_RGB32
if transpose:
imgData = imgData.transpose((1, 0, 2)) ## QImage expects the row/column order to be opposite
profile()
if not imgData.flags['C_CONTIGUOUS']:
if copy is False:
extra = ' (try setting transpose=False)' if transpose else ''
raise Exception('Array is not contiguous; cannot make QImage without copying.'+extra)
imgData = np.ascontiguousarray(imgData)
copied = True
if copy is True and copied is False:
imgData = imgData.copy()
if USE_PYSIDE:
ch = ctypes.c_char.from_buffer(imgData, 0)
img = QtGui.QImage(ch, imgData.shape[1], imgData.shape[0], imgFormat)
else:
#addr = ctypes.addressof(ctypes.c_char.from_buffer(imgData, 0))
## PyQt API for QImage changed between 4.9.3 and 4.9.6 (I don't know exactly which version it was)
## So we first attempt the 4.9.6 API, then fall back to 4.9.3
#addr = ctypes.c_char.from_buffer(imgData, 0)
#try:
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
#except TypeError:
#addr = ctypes.addressof(addr)
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
try:
img = QtGui.QImage(imgData.ctypes.data, imgData.shape[1], imgData.shape[0], imgFormat)
except:
if copy:
# does not leak memory, is not mutable
img = QtGui.QImage(buffer(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
else:
# mutable, but leaks memory
img = QtGui.QImage(memoryview(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
img.data = imgData
return img
#try:
#buf = imgData.data
#except AttributeError: ## happens when image data is non-contiguous
#buf = imgData.data
#profiler()
#qimage = QtGui.QImage(buf, imgData.shape[1], imgData.shape[0], imgFormat)
#profiler()
#qimage.data = imgData
#return qimage
def imageToArray(img, copy=False, transpose=True):
"""
Convert a QImage into numpy array. The image must have format RGB32, ARGB32, or ARGB32_Premultiplied.
By default, the image is not copied; changes made to the array will appear in the QImage as well (beware: if
the QImage is collected before the array, there may be trouble).
The array will have shape (width, height, (b,g,r,a)).
"""
fmt = img.format()
ptr = img.bits()
if USE_PYSIDE:
arr = np.frombuffer(ptr, dtype=np.ubyte)
else:
ptr.setsize(img.byteCount())
arr = np.asarray(ptr)
if img.byteCount() != arr.size * arr.itemsize:
# Required for Python 2.6, PyQt 4.10
# If this works on all platforms, then there is no need to use np.asarray..
arr = np.frombuffer(ptr, np.ubyte, img.byteCount())
if fmt == img.Format_RGB32:
arr = arr.reshape(img.height(), img.width(), 3)
elif fmt == img.Format_ARGB32 or fmt == img.Format_ARGB32_Premultiplied:
arr = arr.reshape(img.height(), img.width(), 4)
if copy:
arr = arr.copy()
if transpose:
return arr.transpose((1,0,2))
else:
return arr
def colorToAlpha(data, color):
"""
Given an RGBA image in *data*, convert *color* to be transparent.
*data* must be an array (w, h, 3 or 4) of ubyte values and *color* must be
an array (3) of ubyte values.
This is particularly useful for use with images that have a black or white background.
Algorithm is taken from Gimp's color-to-alpha function in plug-ins/common/colortoalpha.c
Credit:
/*
* Color To Alpha plug-in v1.0 by Seth Burgess, sjburges@gimp.org 1999/05/14
* with algorithm by clahey
*/
"""
data = data.astype(float)
if data.shape[-1] == 3: ## add alpha channel if needed
d2 = np.empty(data.shape[:2]+(4,), dtype=data.dtype)
d2[...,:3] = data
d2[...,3] = 255
data = d2
color = color.astype(float)
alpha = np.zeros(data.shape[:2]+(3,), dtype=float)
output = data.copy()
for i in [0,1,2]:
d = data[...,i]
c = color[i]
mask = d > c
alpha[...,i][mask] = (d[mask] - c) / (255. - c)
imask = d < c
alpha[...,i][imask] = (c - d[imask]) / c
output[...,3] = alpha.max(axis=2) * 255.
mask = output[...,3] >= 1.0 ## avoid zero division while processing alpha channel
correction = 255. / output[...,3][mask] ## increase value to compensate for decreased alpha
for i in [0,1,2]:
output[...,i][mask] = ((output[...,i][mask]-color[i]) * correction) + color[i]
output[...,3][mask] *= data[...,3][mask] / 255. ## combine computed and previous alpha values
#raise Exception()
return np.clip(output, 0, 255).astype(np.ubyte)
def gaussianFilter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = sigma[ax]
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1,] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
filtered = filtered[sl]
return filtered + baseline
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(np.ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
if n <= 1:
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def arrayToQPath(x, y, connect='all'):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4) 0(i4)
## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect
## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex
## ...
## 0(i4)
##
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
#profiler = debug.Profiler()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])
# write first two integers
#profiler('allocate empty')
byteview = arr.view(dtype=np.ubyte)
byteview[:12] = 0
byteview.data[12:20] = struct.pack('>ii', n, 0)
#profiler('pack header')
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
# decide which points are connected by lines
if connect == 'pairs':
connect = np.empty((n/2,2), dtype=np.int32)
if connect.size != n:
raise Exception("x,y array lengths must be multiple of 2 to use connect='pairs'")
connect[:,0] = 1
connect[:,1] = 0
connect = connect.flatten()
if connect == 'finite':
connect = np.isfinite(x) & np.isfinite(y)
arr[1:-1]['c'] = connect
if connect == 'all':
arr[1:-1]['c'] = 1
elif isinstance(connect, np.ndarray):
arr[1:-1]['c'] = connect
else:
raise Exception('connect argument must be "all", "pairs", or array')
#profiler('fill array')
# write last 0
lastInd = 20*(n+1)
byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)
#profiler('footer')
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
#profiler('create buffer')
ds = QtCore.QDataStream(buf)
ds >> path
#profiler('load')
return path
#def isosurface(data, level):
#"""
#Generate isosurface from volumetric data using marching tetrahedra algorithm.
#See Paul Bourke, "Polygonising a Scalar Field Using Tetrahedrons" (http://local.wasp.uwa.edu.au/~pbourke/geometry/polygonise/)
#*data* 3D numpy array of scalar values
#*level* The level at which to generate an isosurface
#"""
#facets = []
### mark everything below the isosurface level
#mask = data < level
#### make eight sub-fields
#fields = np.empty((2,2,2), dtype=object)
#slices = [slice(0,-1), slice(1,None)]
#for i in [0,1]:
#for j in [0,1]:
#for k in [0,1]:
#fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
### split each cell into 6 tetrahedra
### these all have the same 'orienation'; points 1,2,3 circle
### clockwise around point 0
#tetrahedra = [
#[(0,1,0), (1,1,1), (0,1,1), (1,0,1)],
#[(0,1,0), (0,1,1), (0,0,1), (1,0,1)],
#[(0,1,0), (0,0,1), (0,0,0), (1,0,1)],
#[(0,1,0), (0,0,0), (1,0,0), (1,0,1)],
#[(0,1,0), (1,0,0), (1,1,0), (1,0,1)],
#[(0,1,0), (1,1,0), (1,1,1), (1,0,1)]
#]
### each tetrahedron will be assigned an index
### which determines how to generate its facets.
### this structure is:
### facets[index][facet1, facet2, ...]
### where each facet is triangular and its points are each
### interpolated between two points on the tetrahedron
### facet = [(p1a, p1b), (p2a, p2b), (p3a, p3b)]
### facet points always circle clockwise if you are looking
### at them from below the isosurface.
#indexFacets = [
#[], ## all above
#[[(0,1), (0,2), (0,3)]], # 0 below
#[[(1,0), (1,3), (1,2)]], # 1 below
#[[(0,2), (1,3), (1,2)], [(0,2), (0,3), (1,3)]], # 0,1 below
#[[(2,0), (2,1), (2,3)]], # 2 below
#[[(0,3), (1,2), (2,3)], [(0,3), (0,1), (1,2)]], # 0,2 below
#[[(1,0), (2,3), (2,0)], [(1,0), (1,3), (2,3)]], # 1,2 below
#[[(3,0), (3,1), (3,2)]], # 3 above
#[[(3,0), (3,2), (3,1)]], # 3 below
#[[(1,0), (2,0), (2,3)], [(1,0), (2,3), (1,3)]], # 0,3 below
#[[(0,3), (2,3), (1,2)], [(0,3), (1,2), (0,1)]], # 1,3 below
#[[(2,0), (2,3), (2,1)]], # 0,1,3 below
#[[(0,2), (1,2), (1,3)], [(0,2), (1,3), (0,3)]], # 2,3 below
#[[(1,0), (1,2), (1,3)]], # 0,2,3 below
#[[(0,1), (0,3), (0,2)]], # 1,2,3 below
#[] ## all below
#]
#for tet in tetrahedra:
### get the 4 fields for this tetrahedron
#tetFields = [fields[c] for c in tet]
### generate an index for each grid cell
#index = tetFields[0] + tetFields[1]*2 + tetFields[2]*4 + tetFields[3]*8
### add facets
#for i in xrange(index.shape[0]): # data x-axis
#for j in xrange(index.shape[1]): # data y-axis
#for k in xrange(index.shape[2]): # data z-axis
#for f in indexFacets[index[i,j,k]]: # faces to generate for this tet
#pts = []
#for l in [0,1,2]: # points in this face
#p1 = tet[f[l][0]] # tet corner 1
#p2 = tet[f[l][1]] # tet corner 2
#pts.append([(p1[x]+p2[x])*0.5+[i,j,k][x]+0.5 for x in [0,1,2]]) ## interpolate between tet corners
#facets.append(pts)
#return facets
def isocurve(data, level, connected=False, extendToEdge=False, path=False):
"""
Generate isocurve from 2D data using marching squares algorithm.
============== =========================================================
**Arguments:**
data 2D numpy array of scalar values
level The level at which to generate an isosurface
connected If False, return a single long list of point pairs
If True, return multiple long lists of connected point
locations. (This is slower but better for drawing
continuous lines)
extendToEdge If True, extend the curves to reach the exact edges of
the data.
path if True, return a QPainterPath rather than a list of
vertex coordinates. This forces connected=True.
============== =========================================================
This function is SLOW; plenty of room for optimization here.
"""
if path is True:
connected = True
if extendToEdge:
d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)
d2[1:-1, 1:-1] = data
d2[0, 1:-1] = data[0]
d2[-1, 1:-1] = data[-1]
d2[1:-1, 0] = data[:, 0]
d2[1:-1, -1] = data[:, -1]
d2[0,0] = d2[0,1]
d2[0,-1] = d2[1,-1]
d2[-1,0] = d2[-1,1]
d2[-1,-1] = d2[-1,-2]
data = d2
sideTable = [
[],
[0,1],
[1,2],
[0,2],
[0,3],
[1,3],
[0,1,2,3],
[2,3],
[2,3],
[0,1,2,3],
[1,3],
[0,3],
[0,2],
[1,2],
[0,1],
[]
]
edgeKey=[
[(0,1), (0,0)],
[(0,0), (1,0)],
[(1,0), (1,1)],
[(1,1), (0,1)]
]
lines = []
## mark everything below the isosurface level
mask = data < level
### make four sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
fields[i,j] = mask[slices[i], slices[j]]
#vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
vertIndex = i+2*j
#print i,j,k," : ", fields[i,j,k], 2**vertIndex
index += fields[i,j] * 2**vertIndex
#print index
#print index
## add lines
for i in range(index.shape[0]): # data x-axis
for j in range(index.shape[1]): # data y-axis
sides = sideTable[index[i,j]]
for l in range(0, len(sides), 2): ## faces for this grid cell
edges = sides[l:l+2]
pts = []
for m in [0,1]: # points in this face
p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge
p2 = edgeKey[edges[m]][1]
v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2
v2 = data[i+p2[0], j+p2[1]]
f = (level-v1) / (v2-v1)
fi = 1.0 - f
p = ( ## interpolate between corners
p1[0]*fi + p2[0]*f + i + 0.5,
p1[1]*fi + p2[1]*f + j + 0.5
)
if extendToEdge:
## check bounds
p = (
min(data.shape[0]-2, max(0, p[0]-1)),
min(data.shape[1]-2, max(0, p[1]-1)),
)
if connected:
gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2
pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)
else:
pts.append(p)
lines.append(pts)
if not connected:
return lines
## turn disjoint list of segments into continuous lines
#lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]
#lines = [[(float(a), a), (float(b), b)] for a,b in lines]
points = {} ## maps each point to its connections
for a,b in lines:
if a[1] not in points:
points[a[1]] = []
points[a[1]].append([a,b])
if b[1] not in points:
points[b[1]] = []
points[b[1]].append([b,a])
## rearrange into chains
for k in list(points.keys()):
try:
chains = points[k]
except KeyError: ## already used this point elsewhere
continue
#print "===========", k
for chain in chains:
#print " chain:", chain
x = None
while True:
if x == chain[-1][1]:
break ## nothing left to do on this chain
x = chain[-1][1]
if x == k:
break ## chain has looped; we're done and can ignore the opposite chain
y = chain[-2][1]
connects = points[x]
for conn in connects[:]:
if conn[1][1] != y:
#print " ext:", conn
chain.extend(conn[1:])
#print " del:", x
del points[x]
if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction
chains.pop()
break
## extract point locations
lines = []
for chain in points.values():
if len(chain) == 2:
chain = chain[1][1:][::-1] + chain[0] # join together ends of chain
else:
chain = chain[0]
lines.append([p[0] for p in chain])
if not path:
return lines ## a list of pairs of points
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
return path
def traceImage(image, values, smooth=0.5):
"""
Convert an image to a set of QPainterPath curves.
One curve will be generated for each item in *values*; each curve outlines the area
of the image that is closer to its value than to any others.
If image is RGB or RGBA, then the shape of values should be (nvals, 3/4)
The parameter *smooth* is expressed in pixels.
"""
try:
import scipy.ndimage as ndi
except ImportError:
raise Exception("traceImage() requires the package scipy.ndimage, but it is not importable.")
if values.ndim == 2:
values = values.T
values = values[np.newaxis, np.newaxis, ...].astype(float)
image = image[..., np.newaxis].astype(float)
diff = np.abs(image-values)
if values.ndim == 4:
diff = diff.sum(axis=2)
labels = np.argmin(diff, axis=2)
paths = []
for i in range(diff.shape[-1]):
d = (labels==i).astype(float)
d = gaussianFilter(d, (smooth, smooth))
lines = isocurve(d, 0.5, connected=True, extendToEdge=True)
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
paths.append(path)
return paths
IsosurfaceDataCache = None
def isosurface(data, level):
"""
Generate isosurface from volumetric data using marching cubes algorithm.
See Paul Bourke, "Polygonising a Scalar Field"
(http://paulbourke.net/geometry/polygonise/)
*data* 3D numpy array of scalar values
*level* The level at which to generate an isosurface
Returns an array of vertex coordinates (Nv, 3) and an array of
per-face vertex indexes (Nf, 3)
"""
## For improvement, see:
##
## Efficient implementation of Marching Cubes' cases with topological guarantees.
## Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.
## Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)
## Precompute lookup tables on the first run
global IsosurfaceDataCache
if IsosurfaceDataCache is None:
## map from grid cell index to edge index.
## grid cell index tells us which corners are below the isosurface,
## edge index tells us which edges are cut by the isosurface.
## (Data stolen from Bourk; see above.)
edgeTable = np.array([
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
], dtype=np.uint16)
## Table of triangles to use for filling each grid cell.
## Each set of three integers tells us which three edges to
## draw a triangle between.
## (Data stolen from Bourk; see above.)
triTable = [
[],
[0, 8, 3],
[0, 1, 9],
[1, 8, 3, 9, 8, 1],
[1, 2, 10],
[0, 8, 3, 1, 2, 10],
[9, 2, 10, 0, 2, 9],
[2, 8, 3, 2, 10, 8, 10, 9, 8],
[3, 11, 2],
[0, 11, 2, 8, 11, 0],
[1, 9, 0, 2, 3, 11],
[1, 11, 2, 1, 9, 11, 9, 8, 11],
[3, 10, 1, 11, 10, 3],
[0, 10, 1, 0, 8, 10, 8, 11, 10],
[3, 9, 0, 3, 11, 9, 11, 10, 9],
[9, 8, 10, 10, 8, 11],
[4, 7, 8],
[4, 3, 0, 7, 3, 4],
[0, 1, 9, 8, 4, 7],
[4, 1, 9, 4, 7, 1, 7, 3, 1],
[1, 2, 10, 8, 4, 7],
[3, 4, 7, 3, 0, 4, 1, 2, 10],
[9, 2, 10, 9, 0, 2, 8, 4, 7],
[2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4],
[8, 4, 7, 3, 11, 2],
[11, 4, 7, 11, 2, 4, 2, 0, 4],
[9, 0, 1, 8, 4, 7, 2, 3, 11],
[4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1],
[3, 10, 1, 3, 11, 10, 7, 8, 4],
[1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4],
[4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3],
[4, 7, 11, 4, 11, 9, 9, 11, 10],
[9, 5, 4],
[9, 5, 4, 0, 8, 3],
[0, 5, 4, 1, 5, 0],
[8, 5, 4, 8, 3, 5, 3, 1, 5],
[1, 2, 10, 9, 5, 4],
[3, 0, 8, 1, 2, 10, 4, 9, 5],
[5, 2, 10, 5, 4, 2, 4, 0, 2],
[2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8],
[9, 5, 4, 2, 3, 11],
[0, 11, 2, 0, 8, 11, 4, 9, 5],
[0, 5, 4, 0, 1, 5, 2, 3, 11],
[2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5],
[10, 3, 11, 10, 1, 3, 9, 5, 4],
[4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10],
[5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3],
[5, 4, 8, 5, 8, 10, 10, 8, 11],
[9, 7, 8, 5, 7, 9],
[9, 3, 0, 9, 5, 3, 5, 7, 3],
[0, 7, 8, 0, 1, 7, 1, 5, 7],
[1, 5, 3, 3, 5, 7],
[9, 7, 8, 9, 5, 7, 10, 1, 2],
[10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3],
[8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2],
[2, 10, 5, 2, 5, 3, 3, 5, 7],
[7, 9, 5, 7, 8, 9, 3, 11, 2],
[9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11],
[2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7],
[11, 2, 1, 11, 1, 7, 7, 1, 5],
[9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11],
[5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0],
[11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0],
[11, 10, 5, 7, 11, 5],
[10, 6, 5],
[0, 8, 3, 5, 10, 6],
[9, 0, 1, 5, 10, 6],
[1, 8, 3, 1, 9, 8, 5, 10, 6],
[1, 6, 5, 2, 6, 1],
[1, 6, 5, 1, 2, 6, 3, 0, 8],
[9, 6, 5, 9, 0, 6, 0, 2, 6],
[5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8],
[2, 3, 11, 10, 6, 5],
[11, 0, 8, 11, 2, 0, 10, 6, 5],
[0, 1, 9, 2, 3, 11, 5, 10, 6],
[5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11],
[6, 3, 11, 6, 5, 3, 5, 1, 3],
[0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6],
[3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9],
[6, 5, 9, 6, 9, 11, 11, 9, 8],
[5, 10, 6, 4, 7, 8],
[4, 3, 0, 4, 7, 3, 6, 5, 10],
[1, 9, 0, 5, 10, 6, 8, 4, 7],
[10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4],
[6, 1, 2, 6, 5, 1, 4, 7, 8],
[1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7],
[8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6],
[7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9],
[3, 11, 2, 7, 8, 4, 10, 6, 5],
[5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11],
[0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6],
[9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6],
[8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6],
[5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11],
[0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7],
[6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9],
[10, 4, 9, 6, 4, 10],
[4, 10, 6, 4, 9, 10, 0, 8, 3],
[10, 0, 1, 10, 6, 0, 6, 4, 0],
[8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10],
[1, 4, 9, 1, 2, 4, 2, 6, 4],
[3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4],
[0, 2, 4, 4, 2, 6],
[8, 3, 2, 8, 2, 4, 4, 2, 6],
[10, 4, 9, 10, 6, 4, 11, 2, 3],
[0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6],
[3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10],
[6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1],
[9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3],
[8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1],
[3, 11, 6, 3, 6, 0, 0, 6, 4],
[6, 4, 8, 11, 6, 8],
[7, 10, 6, 7, 8, 10, 8, 9, 10],
[0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10],
[10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0],
[10, 6, 7, 10, 7, 1, 1, 7, 3],
[1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7],
[2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9],
[7, 8, 0, 7, 0, 6, 6, 0, 2],
[7, 3, 2, 6, 7, 2],
[2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7],
[2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7],
[1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11],
[11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1],
[8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6],
[0, 9, 1, 11, 6, 7],
[7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0],
[7, 11, 6],
[7, 6, 11],
[3, 0, 8, 11, 7, 6],
[0, 1, 9, 11, 7, 6],
[8, 1, 9, 8, 3, 1, 11, 7, 6],
[10, 1, 2, 6, 11, 7],
[1, 2, 10, 3, 0, 8, 6, 11, 7],
[2, 9, 0, 2, 10, 9, 6, 11, 7],
[6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8],
[7, 2, 3, 6, 2, 7],
[7, 0, 8, 7, 6, 0, 6, 2, 0],
[2, 7, 6, 2, 3, 7, 0, 1, 9],
[1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6],
[10, 7, 6, 10, 1, 7, 1, 3, 7],
[10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8],
[0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7],
[7, 6, 10, 7, 10, 8, 8, 10, 9],
[6, 8, 4, 11, 8, 6],
[3, 6, 11, 3, 0, 6, 0, 4, 6],
[8, 6, 11, 8, 4, 6, 9, 0, 1],
[9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6],
[6, 8, 4, 6, 11, 8, 2, 10, 1],
[1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6],
[4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9],
[10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3],
[8, 2, 3, 8, 4, 2, 4, 6, 2],
[0, 4, 2, 4, 6, 2],
[1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8],
[1, 9, 4, 1, 4, 2, 2, 4, 6],
[8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1],
[10, 1, 0, 10, 0, 6, 6, 0, 4],
[4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3],
[10, 9, 4, 6, 10, 4],
[4, 9, 5, 7, 6, 11],
[0, 8, 3, 4, 9, 5, 11, 7, 6],
[5, 0, 1, 5, 4, 0, 7, 6, 11],
[11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5],
[9, 5, 4, 10, 1, 2, 7, 6, 11],
[6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5],
[7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2],
[3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6],
[7, 2, 3, 7, 6, 2, 5, 4, 9],
[9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7],
[3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0],
[6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8],
[9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7],
[1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4],
[4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10],
[7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10],
[6, 9, 5, 6, 11, 9, 11, 8, 9],
[3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5],
[0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11],
[6, 11, 3, 6, 3, 5, 5, 3, 1],
[1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6],
[0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10],
[11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5],
[6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3],
[5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2],
[9, 5, 6, 9, 6, 0, 0, 6, 2],
[1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8],
[1, 5, 6, 2, 1, 6],
[1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6],
[10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0],
[0, 3, 8, 5, 6, 10],
[10, 5, 6],
[11, 5, 10, 7, 5, 11],
[11, 5, 10, 11, 7, 5, 8, 3, 0],
[5, 11, 7, 5, 10, 11, 1, 9, 0],
[10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1],
[11, 1, 2, 11, 7, 1, 7, 5, 1],
[0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11],
[9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7],
[7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2],
[2, 5, 10, 2, 3, 5, 3, 7, 5],
[8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5],
[9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2],
[9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2],
[1, 3, 5, 3, 7, 5],
[0, 8, 7, 0, 7, 1, 1, 7, 5],
[9, 0, 3, 9, 3, 5, 5, 3, 7],
[9, 8, 7, 5, 9, 7],
[5, 8, 4, 5, 10, 8, 10, 11, 8],
[5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0],
[0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5],
[10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4],
[2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8],
[0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11],
[0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5],
[9, 4, 5, 2, 11, 3],
[2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4],
[5, 10, 2, 5, 2, 4, 4, 2, 0],
[3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9],
[5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2],
[8, 4, 5, 8, 5, 3, 3, 5, 1],
[0, 4, 5, 1, 0, 5],
[8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5],
[9, 4, 5],
[4, 11, 7, 4, 9, 11, 9, 10, 11],
[0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11],
[1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11],
[3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4],
[4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2],
[9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3],
[11, 7, 4, 11, 4, 2, 2, 4, 0],
[11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4],
[2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9],
[9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7],
[3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10],
[1, 10, 2, 8, 7, 4],
[4, 9, 1, 4, 1, 7, 7, 1, 3],
[4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1],
[4, 0, 3, 7, 4, 3],
[4, 8, 7],
[9, 10, 8, 10, 11, 8],
[3, 0, 9, 3, 9, 11, 11, 9, 10],
[0, 1, 10, 0, 10, 8, 8, 10, 11],
[3, 1, 10, 11, 3, 10],
[1, 2, 11, 1, 11, 9, 9, 11, 8],
[3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9],
[0, 2, 11, 8, 0, 11],
[3, 2, 11],
[2, 3, 8, 2, 8, 10, 10, 8, 9],
[9, 10, 2, 0, 9, 2],
[2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8],
[1, 10, 2],
[1, 3, 8, 9, 1, 8],
[0, 9, 1],
[0, 3, 8],
[]
]
edgeShifts = np.array([ ## maps edge ID (0-11) to (x,y,z) cell offset and edge ID (0-2)
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 2],
[1, 0, 0, 2],
[1, 1, 0, 2],
[0, 1, 0, 2],
#[9, 9, 9, 9] ## fake
], dtype=np.uint16) # don't use ubyte here! This value gets added to cell index later; will need the extra precision.
nTableFaces = np.array([len(f)/3 for f in triTable], dtype=np.ubyte)
faceShiftTables = [None]
for i in range(1,6):
## compute lookup table of index: vertexes mapping
faceTableI = np.zeros((len(triTable), i*3), dtype=np.ubyte)
faceTableInds = np.argwhere(nTableFaces == i)
faceTableI[faceTableInds[:,0]] = np.array([triTable[j] for j in faceTableInds])
faceTableI = faceTableI.reshape((len(triTable), i, 3))
faceShiftTables.append(edgeShifts[faceTableI])
## Let's try something different:
#faceTable = np.empty((256, 5, 3, 4), dtype=np.ubyte) # (grid cell index, faces, vertexes, edge lookup)
#for i,f in enumerate(triTable):
#f = np.array(f + [12] * (15-len(f))).reshape(5,3)
#faceTable[i] = edgeShifts[f]
IsosurfaceDataCache = (faceShiftTables, edgeShifts, edgeTable, nTableFaces)
else:
faceShiftTables, edgeShifts, edgeTable, nTableFaces = IsosurfaceDataCache
## mark everything below the isosurface level
mask = data < level
### make eight sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
for k in [0,1]:
fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
index += fields[i,j,k] * 2**vertIndex
### Generate table of edges that have been cut
cutEdges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)
edges = edgeTable[index]
for i, shift in enumerate(edgeShifts[:12]):
slices = [slice(shift[j],cutEdges.shape[j]+(shift[j]-1)) for j in range(3)]
cutEdges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i
## for each cut edge, interpolate to see where exactly the edge is cut and generate vertex positions
m = cutEdges > 0
vertexInds = np.argwhere(m) ## argwhere is slow!
vertexes = vertexInds[:,:3].astype(np.float32)
dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])
## re-use the cutEdges array as a lookup table for vertex IDs
cutEdges[vertexInds[:,0], vertexInds[:,1], vertexInds[:,2], vertexInds[:,3]] = np.arange(vertexInds.shape[0])
for i in [0,1,2]:
vim = vertexInds[:,3] == i
vi = vertexInds[vim, :3]
viFlat = (vi * (np.array(data.strides[:3]) // data.itemsize)[np.newaxis,:]).sum(axis=1)
v1 = dataFlat[viFlat]
v2 = dataFlat[viFlat + data.strides[i]//data.itemsize]
vertexes[vim,i] += (level-v1) / (v2-v1)
### compute the set of vertex indexes for each face.
## This works, but runs a bit slower.
#cells = np.argwhere((index != 0) & (index != 255)) ## all cells with at least one face
#cellInds = index[cells[:,0], cells[:,1], cells[:,2]]
#verts = faceTable[cellInds]
#mask = verts[...,0,0] != 9
#verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges
#verts = verts[mask]
#faces = cutEdges[verts[...,0], verts[...,1], verts[...,2], verts[...,3]] ## and these are the vertex indexes we want.
## To allow this to be vectorized efficiently, we count the number of faces in each
## grid cell and handle each group of cells with the same number together.
## determine how many faces to assign to each grid cell
nFaces = nTableFaces[index]
totFaces = nFaces.sum()
faces = np.empty((totFaces, 3), dtype=np.uint32)
ptr = 0
#import debug
#p = debug.Profiler()
## this helps speed up an indexing operation later on
cs = np.array(cutEdges.strides)//cutEdges.itemsize
cutEdges = cutEdges.flatten()
## this, strangely, does not seem to help.
#ins = np.array(index.strides)/index.itemsize
#index = index.flatten()
for i in range(1,6):
### expensive:
#profiler()
cells = np.argwhere(nFaces == i) ## all cells which require i faces (argwhere is expensive)
#profiler()
if cells.shape[0] == 0:
continue
cellInds = index[cells[:,0], cells[:,1], cells[:,2]] ## index values of cells to process for this round
#profiler()
### expensive:
verts = faceShiftTables[i][cellInds]
#profiler()
verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges
verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])
#profiler()
### expensive:
verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)
vertInds = cutEdges[verts]
#profiler()
nv = vertInds.shape[0]
#profiler()
faces[ptr:ptr+nv] = vertInds #.reshape((nv, 3))
#profiler()
ptr += nv
return vertexes, faces
def invertQTransform(tr):
"""Return a QTransform that is the inverse of *tr*.
Rasises an exception if tr is not invertible.
Note that this function is preferred over QTransform.inverted() due to
bugs in that method. (specifically, Qt has floating-point precision issues
when determining whether a matrix is invertible)
"""
try:
import numpy.linalg
arr = np.array([[tr.m11(), tr.m12(), tr.m13()], [tr.m21(), tr.m22(), tr.m23()], [tr.m31(), tr.m32(), tr.m33()]])
inv = numpy.linalg.inv(arr)
return QtGui.QTransform(inv[0,0], inv[0,1], inv[0,2], inv[1,0], inv[1,1], inv[1,2], inv[2,0], inv[2,1])
except ImportError:
inv = tr.inverted()
if inv[1] is False:
raise Exception("Transform is not invertible.")
return inv[0]
def pseudoScatter(data, spacing=None, shuffle=True, bidir=False):
"""
Used for examining the distribution of values in a set. Produces scattering as in beeswarm or column scatter plots.
Given a list of x-values, construct a set of y-values such that an x,y scatter-plot
will not have overlapping points (it will look similar to a histogram).
"""
inds = np.arange(len(data))
if shuffle:
np.random.shuffle(inds)
data = data[inds]
if spacing is None:
spacing = 2.*np.std(data)/len(data)**0.5
s2 = spacing**2
yvals = np.empty(len(data))
if len(data) == 0:
return yvals
yvals[0] = 0
for i in range(1,len(data)):
x = data[i] # current x value to be placed
x0 = data[:i] # all x values already placed
y0 = yvals[:i] # all y values already placed
y = 0
dx = (x0-x)**2 # x-distance to each previous point
xmask = dx < s2 # exclude anything too far away
if xmask.sum() > 0:
if bidir:
dirs = [-1, 1]
else:
dirs = [1]
yopts = []
for direction in dirs:
y = 0
dx2 = dx[xmask]
dy = (s2 - dx2)**0.5
limits = np.empty((2,len(dy))) # ranges of y-values to exclude
limits[0] = y0[xmask] - dy
limits[1] = y0[xmask] + dy
while True:
# ignore anything below this y-value
if direction > 0:
mask = limits[1] >= y
else:
mask = limits[0] <= y
limits2 = limits[:,mask]
# are we inside an excluded region?
mask = (limits2[0] < y) & (limits2[1] > y)
if mask.sum() == 0:
break
if direction > 0:
y = limits2[:,mask].max()
else:
y = limits2[:,mask].min()
yopts.append(y)
if bidir:
y = yopts[0] if -yopts[0] < yopts[1] else yopts[1]
else:
y = yopts[0]
yvals[i] = y
return yvals[np.argsort(inds)] ## un-shuffle values before returning
def toposort(deps, nodes=None, seen=None, stack=None, depth=0):
"""Topological sort. Arguments are:
deps dictionary describing dependencies where a:[b,c] means "a depends on b and c"
nodes optional, specifies list of starting nodes (these should be the nodes
which are not depended on by any other nodes). Other candidate starting
nodes will be ignored.
Example::
# Sort the following graph:
#
# B ──┬─────> C <── D
# │ │
# E <─┴─> A <─┘
#
deps = {'a': ['b', 'c'], 'c': ['b', 'd'], 'e': ['b']}
toposort(deps)
=> ['b', 'd', 'c', 'a', 'e']
"""
# fill in empty dep lists
deps = deps.copy()
for k,v in list(deps.items()):
for k in v:
if k not in deps:
deps[k] = []
if nodes is None:
## run through deps to find nodes that are not depended upon
rem = set()
for dep in deps.values():
rem |= set(dep)
nodes = set(deps.keys()) - rem
if seen is None:
seen = set()
stack = []
sorted = []
for n in nodes:
if n in stack:
raise Exception("Cyclic dependency detected", stack + [n])
if n in seen:
continue
seen.add(n)
sorted.extend( toposort(deps, deps[n], seen, stack+[n], depth=depth+1))
sorted.append(n)
return sorted
|
jensengrouppsu/rapid
|
rapid/pyqtgraph/functions.py
|
Python
|
mit
| 86,211
|
[
"Gaussian"
] |
ced8f5604059816d07a9b775e528d6deeaab1df5a35095a6e4ab9a3e47dfb586
|
import vtk
def main():
colors = vtk.vtkNamedColors()
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
style = vtk.vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(style)
# create source
src = vtk.vtkPointSource()
src.SetCenter(0, 0, 0)
src.SetNumberOfPoints(50)
src.SetRadius(5)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(src.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d('Yellow'))
actor.GetProperty().SetPointSize(5)
# assign actor to the renderer
ren.AddActor(actor)
ren.SetBackground(colors.GetColor3d('RoyalBLue'))
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Visualization/InteractorStyleTrackballCamera.py
|
Python
|
apache-2.0
| 1,054
|
[
"VTK"
] |
76b9c9efa9ae73aefae9a46e35d1fa2fd573dca640de1b78b933e777e55a7172
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for gaussian dropout layer."""
import keras
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
import numpy as np
import tensorflow.compat.v2 as tf
@test_combinations.run_all_keras_modes
class NoiseLayersTest(test_combinations.TestCase):
def test_GaussianDropout(self):
test_utils.layer_test(
keras.layers.GaussianDropout,
kwargs={'rate': 0.5},
input_shape=(3, 2, 3))
def _make_model(self, dtype):
assert dtype in (tf.float32, tf.float64)
model = keras.Sequential()
model.add(keras.layers.Dense(8, input_shape=(32,), dtype=dtype))
layer = keras.layers.GaussianDropout(0.1, dtype=dtype)
model.add(layer)
return model
def _train_model(self, dtype):
model = self._make_model(dtype)
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=test_utils.should_run_eagerly())
model.train_on_batch(np.zeros((8, 32)), np.zeros((8, 8)))
def test_gaussian_dropout_float32(self):
self._train_model(tf.float32)
def test_gaussian_dropout_float64(self):
self._train_model(tf.float64)
if __name__ == '__main__':
tf.test.main()
|
keras-team/keras
|
keras/layers/regularization/gaussian_dropout_test.py
|
Python
|
apache-2.0
| 1,867
|
[
"Gaussian"
] |
8c72ed020f48859b09b3fffb4da6dcccfe985763e77a39595d27c0570eb3eb39
|
# sql/compiler.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import re
from . import schema, sqltypes, operators, functions, visitors, \
elements, selectable, crud
from .. import util, exc
import itertools
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.concat_op: ' || ',
operators.match_op: ' MATCH ',
operators.notmatch_op: ' NOT MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
def __init__(self, dialect, statement, bind=None,
compile_kwargs=util.immutabledict()):
"""Construct a new ``Compiled`` object.
:param dialect: ``Dialect`` to compile against.
:param statement: ``ClauseElement`` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
.. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_compiled(self, multiparams, params)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = 'visit_\w+'
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
class SQLCompiler(Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# if False, means we can't be sure the list of entries
# in _result_columns is actually the rendered order. This
# gets flipped when we use TextAsFrom, for example.
self._ordered_columns = True
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns, self._ordered_columns)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry['need_result_map_for_nested'] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop('need_result_map_for_nested')
self._result_columns, self._ordered_columns = (
result_columns, ordered_columns)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = with_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if order_by_elem is not None and order_by_elem.name in \
resolve_dict:
kwargs['render_label_as_label'] = \
element.element._order_by_label_element
return self.process(
element.element, within_columns_clause=within_columns_clause,
**kwargs)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(
element._text_clause
)
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError:
# treat it like text()
util.warn_limited(
"Can't resolve label reference %r; converting to text()",
util.ellipses_string(element.element))
return self.process(
element._text_clause
)
else:
kwargs['render_label_as_label'] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs)
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (within_columns_clause and not
within_label_clause)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(
self, within_columns_clause=True,
within_label_clause=True, **kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw)
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(table.schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw['type_expression'] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf,
compound_index=None,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
if populate_result_map:
self._ordered_columns = False
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
over.func._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
)
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field, extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or \
(compound_index == 0
and entry.get('need_result_map_for_compound', False))
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'asfrom_froms': entry['asfrom_froms'],
'selectable': cs,
'need_result_map_for_compound': need_result_map
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit_clause is not None
or cs._offset_clause is not None) and \
self.limit_clause(cs, **kwargs) or ""
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = getattr(self, "visit_%s_unary_operator" %
unary.operator.__name__, None)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = getattr(self, "visit_%s_unary_modifier" %
unary.modifier.__name__, None)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op)
def visit_binary(self, binary, override_operator=None, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator_ = override_operator or binary.operator
disp = getattr(self, "visit_%s_binary" % operator_.__name__, None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator_)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw)
def _generate_generic_binary(self, binary, opstring, **kw):
return binary.left._compiler_dispatch(self, **kw) + \
opstring + \
binary.right._compiler_dispatch(self, **kw)
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC "
if symmetric else " BETWEEN ", **kw)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " NOT BETWEEN SYMMETRIC "
if symmetric else " NOT BETWEEN ", **kw)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')." %
(bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte, **kwargs)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier(
"alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
if self.positional:
kwargs['positional_names'] = self.cte_positional[cte] = []
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += self.get_render_as_alias_suffix(cte_name)
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name))
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
objects + (column,), type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column, elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite the main one).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == \
'select_container'
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict([
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
froms = self._setup_select_stack(select, entry, asfrom)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
wrapped_inner_columns = set(select_wraps_for.inner_columns)
translate = dict(
(outer, inner.pop()) for outer, inner in [
(
outer,
outer.proxy_set.intersection(wrapped_inner_columns))
for outer in select.inner_columns
] if inner
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and self._is_toplevel_select(select):
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _is_toplevel_select(self, select):
"""Return True if the stack is placed at the given select, and
is also the outermost SELECT, meaning there is either no stack
before this one, or the enclosing stack is a topmost INSERT.
"""
return (
self.stack[-1]['selectable'] is select and
(
len(self.stack) == 1 or self.isinsert and len(self.stack) == 2
and self.statement is self.stack[0]['selectable']
)
)
def _setup_select_hints(self, select):
byfrom = dict([
(from_, hinttext % {
'name': from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom):
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'correlate_froms': all_correlate_froms,
'selectable': select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs):
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (select._limit_clause is not None or
select._offset_clause is not None):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = sum([
self.cte_positional[cte]
for cte in self.ctes], []) + \
self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt, **kw):
self.stack.append(
{'correlate_froms': set(),
"asfrom_froms": set(),
"selectable": insert_stmt})
self.isinsert = True
crud_params = crud._get_crud_params(self, insert_stmt, **kw)
if not crud_params and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
insert_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if insert_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
insert_stmt.table,
dialect_hints[insert_stmt.table],
True
)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in crud_params_single])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if insert_stmt.select is not None:
text += " %s" % self.process(self._insert_from_select, **kw)
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in crud_param_set)
)
for crud_param_set in crud_params
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in crud_params])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
self.stack.pop(-1)
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
return from_table._compiler_dispatch(self, asfrom=True,
iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, **kw):
self.stack.append(
{'correlate_froms': set([update_stmt.table]),
"asfrom_froms": set([update_stmt.table]),
"selectable": update_stmt})
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
crud_params = crud._get_crud_params(self, update_stmt, **kw)
if update_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
update_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if update_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
update_stmt.table,
dialect_hints[update_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in crud_params
)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
self.stack.pop(-1)
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self)
def visit_delete(self, delete_stmt, **kw):
self.stack.append({'correlate_froms': set([delete_stmt.table]),
"asfrom_froms": set([delete_stmt.table]),
"selectable": delete_stmt})
self.isdelete = True
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = delete_stmt.table._compiler_dispatch(
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
delete_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if delete_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
delete_stmt.table,
dialect_hints[delete_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self)
if t:
text += " WHERE " + t
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] +
table._prefixes +
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s") %
(table.description, column.name, ce.args[0])
))
const = self.create_table_constraints(
table, _include_foreign_key_constraints=
create.include_foreign_key_constraints)
if const:
text += ", \n\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint)
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(
self, table,
_include_foreign_key_constraints=None):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key and
c not in omit_fkcs])
return ", \n\t".join(
p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if include_schema and index.table is not None and index.table.schema:
schema = index.table.schema
schema_name = self.preparer.quote_schema(schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint.elements),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint.elements)
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return 'TIMESTAMP'
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
if (not self.omit_schema and use_schema and
sequence.schema is not None):
name = self.quote_schema(sequence.schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident)
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table)
if name:
return self.quote(name)
elif isinstance(constraint.name, elements._defer_none_name):
return None
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
|
gdimitris/FleetManagerBackend
|
virtual_env/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py
|
Python
|
mit
| 100,568
|
[
"VisIt"
] |
06cdbf808b92dd5f0795f45bc7f1e5862f2b13e344dad808c2ed669dc07cfdd6
|
import matplotlib.pyplot as plt
import matplotlib
import pickle
import math
import numpy as np
import os
from astropy.io import fits
import pickle
from astropy.table import Table
import AnniesLasso_2 as tc
import sincinterpol
def log(x):
if x>0:
return math.log10(x)
else:
return -np.inf
def sinc_interp(x, s, u):
# Your x is the raw flux
# Your s is the wave length of the raw flux. s should have equal distance
# Your u is the log wl, which has equal distance between the neighborhoods.
# The length of u is 8575 and you can use log wl
if len(x) != len(s):
print("len(x) should be equal to len(s")
# Find the period
# T_r = s[1] - s[0]
# I don't think these two methods have a big different.
# Can we use this?
# parameter a:
a =1
N = len(s)
T = a*(s[N-1]-s[0])/N
sincM = np.tile(u, (len(s), 1)) - np.tile(s[:, np.newaxis], (1, len(u)))
y = np.dot(x, np.sinc(sincM / T))
return y
log = np.vectorize(log)
pkl_file = open('wl.pkl', 'rb')
wl = pickle.load(pkl_file)
pkl_file.close()
data_path = "/Users/caojunzhi/Desktop/Data_example/"
# s = wl
apstar = fits.open(data_path+"apStar-r6-2M00005143+5615568.fits")
apstar_table = Table.read(data_path+"apStar-r6-2M00005143+5615568.fits")
jd_array = np.array(apstar_table[0]["JD"])
print(jd_array)
## Let's save everything into a fits file.
# import data
image_path = np.array(["apVisit-r6-5094-55874-088.fits","apVisit-r6-5094-56643-088.fits","apVisit-r6-5094-56651-082.fits"])
# index!! For which visit
index=0
image = fits.open(data_path+image_path[index],ignore_missing_end=True)
dat = Table.read(data_path+image_path[index])
print(dat[0]["JD"])
# flux at 1 and wl at 4
flux_raw =image[1].data.ravel()[::-1]
# Three chips
XSHIFT = image[0].header["XSHIFT"]
# Dither
# red green blue 5, 4.25 and 3.5
beta_red = (XSHIFT+5)*4.144/(3*10**5)
beta_green = (XSHIFT+4.25)*4.144/(3*10**5)
beta_blue = (XSHIFT+3.5)*4.144/(3*10**5)
red = image[4].data[0,:]
green = image[4].data[1,:]
blue = image[4].data[2,:]
# Dither:
red = (-beta_red+1)*red
green = (-beta_green+1)*green
blue = (-beta_blue+1)*blue
wl_raw =np.append(red,[green,blue])
wl_raw = wl_raw[::-1]
## Do interpolation:
wl_log = log(wl)
wl_raw_log = log(wl_raw)
print("doing interpolation")
y_inter = sinc_interp(x=flux_raw,s=wl_raw_log,u=wl_log)
# Add velocity
# labels
array = np.array([[ 4.80458566e+03 , 2.57179854e+00 , -2.01372206e-01],
[ 4.79263171e+03 , 2.64864274e+00, -1.82310447e-01],
[ 4.78393682e+03 , 2.65073084e+00 , -1.81901661e-01],
[ 4.80550440e+03 , 2.57941485e+00 , -2.04721940e-01],
[ 4.79156434e+03 , 2.63379621e+00 , -1.83733041e-01],
[ 4.77938538e+03 , 2.65496682e+00 , -1.83967319e-01]])
### Normalize y spectra:
# y_inter = y_inter*(np.nansum(apstar[1].data[2+index,:]))/np.nansum(y_inter)
############ plots
font = {'family': 'normal',
'weight': 'bold',
'size': 15}
matplotlib.rc('font', **font)
plt.subplot(2,1,1)
plt.step(wl,apstar[1].data[2+index,:],"k", label = "$APOGEE\quad team\quad spectra$",linewidth=0.7, alpha=1)
plt.plot(wl,20*apstar[2].data[2+index,:],"r", label = "$20\quad times\quad spectra\quad error$",linewidth=0.7, alpha=0.5)
plt.plot(wl,20*(apstar[1].data[2+index,:]-y_inter),"g",label = "$20\quad times\quad Residual\quad$", linewidth=0.7, alpha=0.5)
plt.xlabel("$Wave\quad length\quad \AA$", fontsize=20)
plt.ylabel("$Flux$", fontsize=20)
plt.suptitle("$Comparison\quad of\quad the\quad spectra\quad for\quad one\quad epoch\quad %s$"%(str(index+1)), fontsize=30)
plt.legend()
axes = plt.gca()
axes.set_xlim([15660, 15780])
axes.set_ylim([-500,1000])
plt.subplot(2,1,2)
plt.step(wl,apstar[1].data[2+index,:],"k", label = "$APOGEE\quad team\quad spectra$",linewidth=0.7, alpha=1)
plt.plot(wl,20*apstar[2].data[2+index,:],"r", label = "$20\quad times\quad spectra\quad error$",linewidth=0.7, alpha=0.5)
plt.plot(wl,20*(apstar[1].data[2+index,:]-y_inter),"g",label = "$20\quad times\quad Residual\quad$", linewidth=0.7, alpha=0.5)
plt.ylabel("$Flux$", fontsize=20)
plt.suptitle("$Comparison\quad of\quad the\quad spectra\quad for\quad one\quad epoch\quad %s$"%(str(index+1)), fontsize=30)
plt.legend()
axes = plt.gca()
axes.set_xlim([16160, 16280])
axes.set_ylim([-500,1000])
# save it:
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(14.5, 11.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170621_David/" + "New_flux_residual_"+ str(index)+ ".png"
fig.savefig(save_path, dpi=500)
plt.close()
#### Compare spectra:
font = {'family': 'normal',
'weight': 'bold',
'size': 15}
matplotlib.rc('font', **font)
plt.subplot(2,1,1)
plt.step(wl,apstar[1].data[2+index,:],"k", label = "$APOGEE\quad team\quad Teff=%.2fK \quad Logg=%.2f dex \quad FeH=%.2f dex$"%(array[index,0],array[index,1],array[index,2]),linewidth=0.7, alpha=1)
plt.plot(wl,y_inter,"g",label = "$My\quad code\quad Teff=%.2f K \quad Logg=%.2f dex\quad FeH=%.2f dex$"%(array[index+3,0],array[index+3,1],array[index+3,2]), linewidth=0.7, alpha=0.5)
plt.xlabel("$Wave\quad length\quad \AA$", fontsize=20)
plt.ylabel("$Flux$", fontsize=20)
plt.suptitle("$Comparison\quad of\quad the\quad spectra\quad for\quad one\quad epoch\quad %s$"%(str(index+1)), fontsize=30)
plt.legend()
axes = plt.gca()
axes.set_xlim([15660, 15780])
plt.subplot(2,1,2)
plt.step(wl,apstar[1].data[2+index,:],"k", label = "$APOGEE\quad team\quad Teff=%.2fK \quad Logg=%.2f dex \quad FeH=%.2f dex$"%(array[index,0],array[index,1],array[index,2]),linewidth=0.7, alpha=1)
plt.plot(wl,y_inter,"g",label = "$My\quad code\quad Teff=%.2f K \quad Logg=%.2f dex\quad FeH=%.2f dex$"%(array[index+3,0],array[index+3,1],array[index+3,2]), linewidth=0.7, alpha=0.5)
plt.ylabel("$Flux$", fontsize=20)
plt.suptitle("$Comparison\quad of\quad the\quad spectra\quad for\quad one\quad epoch\quad %s$"%(str(index+1)), fontsize=30)
plt.legend()
axes = plt.gca()
axes.set_xlim([16160, 16280])
# save it:
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(14.5, 11.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170621_David/" + "New_flux_"+ str(index)+ ".png"
fig.savefig(save_path, dpi=500)
plt.close()
|
peraktong/Cannon-Experiment
|
Sinc_interpolation/0629_interpolation_residual.py
|
Python
|
mit
| 6,215
|
[
"VisIt"
] |
937c7a46dc0255b7ce0fdc0268f3cb4a3744d9b3c87c3f9ca62f47c00f5bc9c1
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-accounting-cli
# Author : Adria Casajus
########################################################################
"""
Command line administrative interface to DIRAC Accounting DataStore Service
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.localCfg.addDefaultEntry( "LogLevel", "info" )
Script.setUsageMessage('\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ...' % Script.scriptName, ] ) )
Script.parseCommandLine()
from DIRAC.AccountingSystem.Client.AccountingCLI import AccountingCLI
if __name__=="__main__":
acli = AccountingCLI()
acli.start()
|
andresailer/DIRAC
|
AccountingSystem/scripts/dirac-admin-accounting-cli.py
|
Python
|
gpl-3.0
| 806
|
[
"DIRAC"
] |
721f05d5b885c1032a6aec6fb92ad412a12f71dbc6704ac4b968995d877f5bea
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import shutil
import utils
BASENAME = "gdcm"
GIT_REPO = "git://git.code.sf.net/p/gdcm/gdcm "
GIT_TAG = "v2.0.17"
dependencies = ['SWIG', 'VTK']
class GDCM(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s-build' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
def get(self):
if os.path.exists(self.source_dir):
utils.output("gdcm already checked out, skipping step.")
else:
os.chdir(config.archive_dir)
ret = os.system("git clone %s" % (GIT_REPO,))
if ret != 0:
utils.error("Could not clone GDCM repo. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("git checkout %s" % (GIT_TAG,))
if ret != 0:
utils.error("Could not checkout GDCM %s. Fix and try again." % (GIT_TAG,))
def unpack(self):
# no unpack step
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("gdcm build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
cmake_params = \
"-DGDCM_BUILD_APPLICATIONS=OFF " \
"-DGDCM_BUILD_EXAMPLES=OFF " \
"-DGDCM_BUILD_SHARED_LIBS=ON " \
"-DGDCM_BUILD_TESTING=OFF " \
"-DGDCM_USE_ITK=OFF " \
"-DGDCM_USE_VTK=ON " \
"-DGDCM_USE_WXWIDGETS=OFF " \
"-DGDCM_WRAP_JAVA=OFF " \
"-DGDCM_WRAP_PHP=OFF " \
"-DGDCM_WRAP_PYTHON=ON " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DSWIG_DIR=%s " \
"-DSWIG_EXECUTABLE=%s " \
"-DVTK_DIR=%s " \
"-DPYTHON_EXECUTABLE=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DPYTHON_INCLUDE_PATH=%s " % \
(self.inst_dir, config.SWIG_DIR,
config.SWIG_EXECUTABLE, config.VTK_DIR,
config.PYTHON_EXECUTABLE,
config.PYTHON_LIBRARY,
config.PYTHON_INCLUDE_PATH)
ret = utils.cmake_command(self.build_dir, self.source_dir,
cmake_params)
if ret != 0:
utils.error("Could not configure GDCM. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir,
'bin/libvtkgdcmPython.so')
nt_file = os.path.join(self.build_dir, 'bin',
config.BUILD_TARGET, 'vtkgdcmPythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("GDCM already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('GDCM.sln')
if ret != 0:
utils.error("Could not build GDCM. Fix and try again.")
def install(self):
if os.name == 'nt':
config.GDCM_LIB = os.path.join(
self.inst_dir, 'bin')
else:
config.GDCM_LIB = os.path.join(self.inst_dir, 'lib')
config.GDCM_PYTHON = os.path.join(self.inst_dir, 'lib')
test_file = os.path.join(config.GDCM_PYTHON, 'gdcm.py')
if os.path.exists(test_file):
utils.output("gdcm already installed, skipping step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('GDCM.sln', install=True)
if ret != 0:
utils.error(
"Could not install gdcm. Fix and try again.")
def clean_build(self):
utils.output("Removing build and installation directories.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def clean_install(self):
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
import gdcm
return gdcm.Version.GetVersion()
|
nagyistoce/devide.johannes
|
install_packages/ip_gdcm.py
|
Python
|
bsd-3-clause
| 4,758
|
[
"VTK"
] |
276864cb0b5690fc82aac6547d0c24871b8d9a40a620ee915726d8e1e06ca00e
|
"""
Modified version of RMSD distance metric that does the following:
1) Returns the RMSD, rotation matrices, and aligned conformations
2) Permutes selected atomic indices.
3) Perform the alignment again.
Sniffy
"""
from msmbuilder.metrics import AbstractDistanceMetric
from msmbuilder.metrics import RMSD
import _lprmsd
import mdtraj as md
import numpy as np
import itertools
from scipy import optimize
import copy
import logging
logger = logging.getLogger('LPRMSD')
PT = {'H' : 1.0079, 'He' : 4.0026,
'Li' : 6.941, 'Be' : 9.0122, 'B' : 10.811, 'C' : 12.0107, 'N' : 14.0067, 'O' : 15.9994, 'F' : 18.9984, 'Ne' : 20.1797,
'Na' : 22.9897, 'Mg' : 24.305, 'Al' : 26.9815, 'Si' : 28.0855, 'P' : 30.9738, 'S' : 32.065, 'Cl' : 35.453, 'Ar' : 39.948,
'K' : 39.0983, 'Ca' : 40.078, 'Sc' : 44.9559, 'Ti' : 47.867, 'V' : 50.9415, 'Cr' : 51.9961, 'Mn' : 54.938, 'Fe' : 55.845,
'Co' : 58.9332, 'Ni' : 58.6934, 'Cu' : 63.546, 'Zn' : 65.39, 'Ga' : 69.723, 'Ge' : 72.64, 'As' : 74.9216, 'Se' : 78.96,
'Br' : 79.904, 'Kr' : 83.8, 'Rb' : 85.4678, 'Sr' : 87.62, 'Y' : 88.9059, 'Zr' : 91.224, 'Nb' : 92.9064, 'Mo' : 95.94,
'Tc' : 98, 'Ru' : 101.07, 'Rh' : 102.9055, 'Pd' : 106.42, 'Ag' : 107.8682, 'Cd' : 112.411, 'In' : 114.818, 'Sn' : 118.71,
'Sb' : 121.76, 'Te' : 127.6, 'I' : 126.9045, 'Xe' : 131.293, 'Cs' : 132.9055, 'Ba' : 137.327, 'La' : 138.9055,
'Ce' : 140.116, 'Pr' : 140.9077, 'Nd' : 144.24, 'Pm' : 145, 'Sm' : 150.36, 'Eu' : 151.964, 'Gd' : 157.25, 'Tb' : 158.9253,
'Dy' : 162.5, 'Ho' : 164.9303, 'Er' : 167.259, 'Tm' : 168.9342, 'Yb' : 173.04, 'Lu' : 174.967, 'Hf' : 178.49,
'Ta' : 180.9479, 'W' : 183.84, 'Re' : 186.207, 'Os' : 190.23, 'Ir' : 192.217, 'Pt' : 195.078, 'Au' : 196.9665,
'Hg' : 200.59, 'Tl' : 204.3833, 'Pb' : 207.2, 'Bi' : 208.9804, 'Po' : 209, 'At' : 210, 'Rn' : 222, 'Fr' : 223,
'Ra' : 226, 'Ac' : 227, 'Th' : 232.0381, 'Pa' : 231.0359, 'U' : 238.0289, 'Np' : 237, 'Pu' : 244, 'Am' : 243,
'Cm' : 247, 'Bk' : 247, 'Cf' : 251, 'Es' : 252, 'Fm' : 257, 'Md' : 258, 'No' : 259, 'Lr' : 262, 'Rf' : 261,
'Db' : 262, 'Sg' : 266, 'Bh' : 264, 'Hs' : 277, 'Mt' : 268
}
def ReadPermFile(fnm):
LL = []
L = []
K = []
fopen = open(fnm).readlines()
for ln, line in enumerate(fopen):
s = line.strip()
if '--' not in s:
L.append(int(s))
if (ln != 0 and '--' in s) or (ln == len(fopen) - 1):
LL.append(np.array(L))
if len(s.split()) > 1:
try:
K.append(int(s.split()[1]))
except:
logger.error("The syntax of this line is incorrect: %s", line)
else:
K.append(len(L))
L = []
else:
continue
return (LL, K)
class LPTraj(dict):
def __init__(self, S, atomindices=None, permuteindices=None):
super(LPTraj, self).__init__()
self['XYZList'] = S.xyz
aidx = list(atomindices) if atomindices != None else []
pidx = list(itertools.chain(*permuteindices)) if permuteindices != None else []
if atomindices == None:
self.TD = RMSD.TheoData(S.xyz)
else:
self.TD = RMSD.TheoData(S.xyz[:, np.array(aidx)])
def __getitem__(self, key):
if isinstance(key, int) or isinstance(key, slice) or isinstance(key, np.ndarray):
if isinstance(key, int):
key = [key]
newtraj = copy.copy(self)
newtraj['XYZList'] = self['XYZList'][key]
newtraj.TD = self.TD[key]
return newtraj
return super(LPTraj, self).__getitem__(key)
def EulerMatrix(T1, T2, T3):
DMat = np.mat(np.zeros((3, 3), dtype=float))
DMat[0, 0] = np.cos(T1)
DMat[0, 1] = np.sin(T1)
DMat[1, 0] = -np.sin(T1)
DMat[1, 1] = np.cos(T1)
DMat[2, 2] = 1
CMat = np.mat(np.zeros((3, 3), dtype=float))
CMat[0, 0] = 1
CMat[1, 1] = np.cos(T2)
CMat[1, 2] = np.sin(T2)
CMat[2, 1] = -np.sin(T2)
CMat[2, 2] = np.cos(T2)
BMat = np.mat(np.zeros((3, 3), dtype=float))
BMat[0, 0] = np.cos(T3)
BMat[0, 1] = np.sin(T3)
BMat[1, 0] = -np.sin(T3)
BMat[1, 1] = np.cos(T3)
BMat[2, 2] = 1
EMat = BMat * CMat * DMat
return np.mat(EMat)
def AlignToMoments(elem, xyz1, xyz2=None):
"""Pre-aligns molecules to 'moment of inertia'.
If xyz2 is passed in, it will assume that xyz1 is already
aligned to the moment of inertia, and it simply does 180-degree
rotations to make sure nothing is inverted."""
xyz = xyz1 if xyz2 == None else xyz2
I = np.zeros((3, 3))
for ei, xi in zip(elem, xyz):
I += PT[ei] * (np.dot(xi, xi) * np.eye(3) - np.outer(xi, xi))
A, B = np.linalg.eig(I)
# Sort eigenvectors by eigenvalue
BB = B[:, np.argsort(A)]
determ = np.linalg.det(BB)
Thresh = 1e-3
if np.abs(determ - 1.0) > Thresh:
if np.abs(determ + 1.0) > Thresh:
logger.error("AHOOGA, determinant is % .3f", determ)
BB[:, 2] *= -1
xyzr = np.array(np.mat(BB).T * np.mat(xyz).T).T.copy()
if xyz2 != None:
xyzrr = AlignToDensity(elem, xyz1, xyzr, binary=True)
return xyzrr
else:
return xyzr
def ComputeOverlap(theta, elem, xyz1, xyz2):
"""
Computes an 'overlap' between two molecules based on some
fictitious density. Good for fine-tuning alignment but gets stuck
in local minima.
"""
xyz2R = np.array(EulerMatrix(theta[0], theta[1], theta[2]) * np.mat(xyz2.T)).T
Obj = 0.0
for i in set(elem):
for j in np.where(elem == i)[0]:
for k in np.where(elem == i)[0]:
dx = xyz1[j] - xyz2R[k]
dx2 = np.dot(dx, dx)
Obj -= np.exp(-0.5 * dx2)
return Obj
def AlignToDensity(elem, xyz1, xyz2, binary=False):
"""
Pre-aligns molecules to some density.
I don't really like this, but I have to start with some alignment
and a grid scan just plain sucks.
This function can be called by AlignToMoments to get rid of inversion problems
"""
t0 = np.array([0, 0, 0])
if binary:
t1 = optimize.brute(ComputeOverlap, ((0, np.pi), (0, np.pi), (0, np.pi)), args=(elem, xyz1, xyz2), Ns=2, finish=optimize.fmin_bfgs)
else:
t1 = optimize.brute(ComputeOverlap, ((0, 2 * np.pi), (0, 2 * np.pi), (0, 2 * np.pi)), args=(elem, xyz1, xyz2), Ns=6, finish=optimize.fmin_bfgs)
xyz2R = (np.array(EulerMatrix(t1[0], t1[1], t1[2]) * np.mat(xyz2.T)).T).copy()
return xyz2R
class LPRMSD(AbstractDistanceMetric):
def __init__(self, atomindices=None, permuteindices=None, altindices=None, moments=False, gridmesh=0, debug=False):
self.atomindices = atomindices
self.altindices = altindices
if permuteindices != None:
self.permuteindices = permuteindices[0]
self.permutekeep = permuteindices[1]
else:
self.permuteindices = None
self.permutekeep = None
self.grid = None
self.moments = moments
self.debug = debug
if gridmesh > 0:
# Generate a list of Euler angles
self.grid = list(itertools.product(*[list(np.arange(0, 2 * np.pi, 2 * np.pi / gridmesh)) for i in range(gridmesh)]))
def _compute_one_to_all(self, pt1, pt2, index1, b_xyzout=False):
#=========================================#
# Required information #
#=========================================#
# Two prepared trajectories
# A list of lists of permutable indices
# A list of nonpermutable indices (aka the AtomIndices)
# Boolean of whether to do the grid scan
if self.atomindices == None and self.permuteindices == None:
self.atomindices = np.arange(pt2['XYZList'].shape[1])
Usage = 0
pi_flat = np.array([])
pi_lens = np.array([])
pi_keep = np.array([])
alt_idx = np.array([])
id_idx = np.array([])
if self.atomindices != None:
Usage += 1000
id_idx = np.array(self.atomindices)
if self.permuteindices != None:
Usage += 100
pi_flat = np.array(list(itertools.chain(*self.permuteindices)))
pi_lens = np.array([len(i) for i in self.permuteindices])
pi_keep = np.array(self.permutekeep)
if self.altindices != None:
Usage += 10
alt_idx = np.array(self.altindices)
if b_xyzout :
Usage += 1
XYZOut = pt2['XYZList'].transpose(0, 2, 1).copy().astype('float32')
XYZRef = pt1['XYZList'].transpose(0, 2, 1)[index1].copy().astype('float32')
RotOut = np.zeros(len(pt2) * 9, dtype='float32')
RMSDOut = _lprmsd.LPRMSD_Multipurpose(Usage, self.debug,
pt1.TD.NumAtoms, pt1.TD.NumAtomsWithPadding, pt1.TD.NumAtomsWithPadding,
pt2.TD.XYZData, pt1.TD.XYZData[index1], pt2.TD.G, pt1.TD.G[index1],
id_idx, pi_flat, pi_lens, pi_keep, alt_idx, RotOut, XYZOut, XYZRef)
if b_xyzout:
return RMSDOut, XYZOut.transpose(0, 2, 1)
else:
return RMSDOut
def one_to_all_aligned(self, prepared_traj1, prepared_traj2, index1):
"""
Inputs: Two trajectories (Unlike RMSD, this takes in raw trajectory files)
Calculate a vector of distances from the index1th frame of prepared_traj1
to all the frames in prepared_traj2. This always uses OMP parallelization.
If you really don't want OMP paralellization (why not?), then you can modify
the C code yourself.
Returns: a vector of distances of length len(indices2)"""
return self._compute_one_to_all(prepared_traj1, prepared_traj2, index1, b_xyzout=True)
def prepare_trajectory(self, trajectory):
""" Copies the trajectory and optionally performs pre-alignment using moments of inertia. """
T1 = LPTraj(trajectory, self.atomindices, self.permuteindices)
if self.moments:
xyz1 = trajectory.xyz[0]
xyz1 -= xyz1.mean(0)
# TODO: Change this to mdtraj.
# Should I construct a list of atom names or try to be fancy
# and use a pandas dataframe
xyz1 = AlignToMoments(trajectory['AtomNames'], xyz1)
for index2, xyz2 in enumerate(trajectory['XYZList']):
xyz2 -= xyz2.mean(0)
xyz2 = AlignToMoments(trajectory['AtomNames'], xyz1, xyz2)
T1['XYZList'][index2] = xyz2
else:
for index, xyz in enumerate(trajectory.xyz):
if not self.atomindices is None:
xsel = xyz[np.array(self.atomindices), :]
else:
xsel = xyz
xyz -= xsel.mean(0)
T1['XYZList'][index] = xyz.copy()
return T1
def one_to_all(self, prepared_traj1, prepared_traj2, index1):
"""
Inputs: Two trajectories (Unlike RMSD, this takes in raw trajectory files)
Calculate a vector of distances from the index1th frame of prepared_traj1
to all the frames in prepared_traj2. This always uses OMP parallelization.
If you really don't want OMP paralellization (why not?), then you can modify
the C code yourself.
Returns: a vector of distances of length len(indices2)"""
return self._compute_one_to_all(prepared_traj1, prepared_traj2, index1, b_xyzout=False)
def add_metric_parser(parsergroup, add_argument):
lprmsd = parsergroup.add_parser('lprmsd',
description='''LPRMSD: RMSD with the ability to to handle permutation-invariant atoms.
Solves the assignment problem using a linear programming solution (LP). Can handle aligning
on some atoms and computing the RMSD on other atoms.:''')
add_argument(lprmsd, '-a', dest='lprmsd_atom_indices', help='Regular atom indices. Pass "all" to use all atoms.', default='AtomIndices.dat')
add_argument(lprmsd, '-l', dest='lprmsd_alt_indices', default=None,
help='''Optional alternate atom indices for RMSD. If you want to align the trajectories
using one set of atom indices but then compute the distance using a different
set of indices, use this option. If supplied, the regular atom_indices will
be used for the alignment and these indices for the distance calculation''')
add_argument(lprmsd, '-P', dest='lprmsd_permute_atoms', default=None, help='''Atom labels to be permuted.
Sets of indistinguishable atoms that can be permuted to minimize the RMSD. On disk this should be stored as
a list of newline separated indices with a "--" separating the sets of indices if there are
more than one set of indistinguishable atoms. Use "-- (integer)" to include a subset in the RMSD (to avoid undesirable boundary effects.)''')
return lprmsd
def construct_metric(args):
if args.metric != 'lprmsd':
return None
if args.lprmsd_atom_indices != 'all':
atom_inds = np.loadtxt(args.lprmsd_atom_indices, dtype=np.int)
else:
atom_inds = None
if args.lprmsd_permute_atoms is not None:
permute_inds = ReadPermFile(args.lprmsd_permute_atoms)
else:
permute_inds = None
if args.lprmsd_alt_indices is not None:
alt_inds = np.loadtxt(args.lprmsd_alt_indices, np.int)
else:
alt_inds = None
return LPRMSD(atom_inds, permute_inds, alt_inds)
|
mpharrigan/msmbuilder
|
Extras/LPRMSD/lprmsd.py
|
Python
|
gpl-2.0
| 13,612
|
[
"MDTraj"
] |
98f451a979c1936d4ca4af9fb6c3f32fc0b5743a120378429c771da28b901bfc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Convert a FAME input file to a MEASURE input file.
"""
import argparse
import numpy
import os.path
from chempy.molecule import Molecule
from chempy.species import Species, TransitionState
from chempy.reaction import Reaction
from chempy.species import LennardJones
from chempy.states import *
from chempy.kinetics import ArrheniusModel
from measure.network import Network
from measure.collision import SingleExponentialDownModel
################################################################################
def parseCommandLineArguments():
"""
Parse the command-line arguments being passed to MEASURE. These are
described in the module docstring.
"""
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar='FILE', type=str, nargs='+',
help='a file to convert')
parser.add_argument('-d', '--dictionary', metavar='DICTFILE', type=str, nargs=1,
help='the RMG dictionary corresponding to these files')
return parser.parse_args()
################################################################################
def readMeaningfulLine(f):
line = f.readline()
while line != '':
line = line.strip()
if len(line) > 0 and line[0] != '#':
return line
else:
line = f.readline()
return ''
if __name__ == '__main__':
# Parse the command-line arguments
args = parseCommandLineArguments()
# Load RMG dictionary if specified
moleculeDict = {}
if args.dictionary is not None:
f = open(args.dictionary[0])
adjlist = ''; label = ''
for line in f:
if len(line.strip()) == 0:
if len(adjlist.strip()) > 0:
molecule = Molecule()
molecule.fromAdjacencyList(adjlist)
moleculeDict[label] = molecule
adjlist = ''; label = ''
else:
if len(adjlist.strip()) == 0:
label = line.strip()
adjlist += line
f.close()
method = None
for fstr in args.file:
print 'Loading file "%s"...' % fstr
f = open(fstr)
# Read method
method = readMeaningfulLine(f).lower()
if method == 'modifiedstrongcollision': method = 'modified strong collision'
elif method == 'reservoirstate': method = 'reservoir state'
# Read temperatures
data = readMeaningfulLine(f).split()
assert data[1] == 'K'
Tmin = float(data[2]); Tmax = float(data[3])
Tlist = numpy.zeros(int(data[0]), numpy.float64)
for i in range(int(data[0])):
Tlist[i] = float(readMeaningfulLine(f))
# Read pressures
data = readMeaningfulLine(f).split()
assert data[1] == 'Pa'
Pmin = float(data[2]); Pmax = float(data[3])
Plist = numpy.zeros(int(data[0]), numpy.float64)
for i in range(int(data[0])):
Plist[i] = float(readMeaningfulLine(f))
# Read interpolation model
model = readMeaningfulLine(f).split()
# Read grain size or number of grains
data = readMeaningfulLine(f).split()
if data[0].lower() == 'numgrains':
Ngrains = int(data[1])
grainSize = 0.0
elif data[0].lower() == 'grainsize':
assert data[2] == 'J/mol'
Ngrains = 0
grainSize = float(data[2])
network = Network()
# Read collision model
data = readMeaningfulLine(f).split()
assert data[0].lower() == 'singleexpdown'
assert data[1] == 'J/mol'
network.collisionModel = SingleExponentialDownModel(alpha0=float(data[2]), T0=298, n=0.0)
# Read bath gas parameters
bathGas = Species()
bathGas.molecularWeight = float(readMeaningfulLine(f).split()[1]) / 1000.0
bathGas.lennardJones = LennardJones(sigma=float(readMeaningfulLine(f).split()[1]), epsilon=float(readMeaningfulLine(f).split()[1]))
# Read species data
Nspec = int(readMeaningfulLine(f))
speciesDict = {}
for i in range(Nspec):
spec = Species()
# Read species label
spec.label = readMeaningfulLine(f)
speciesDict[spec.label] = spec
if spec.label in moleculeDict:
spec.molecule = [moleculeDict[spec.label]]
# Read species E0
data = readMeaningfulLine(f).split()
assert data[0] == 'J/mol'
spec.E0 = float(data[1])
# Read and ignore species thermo data
for j in range(10):
data = readMeaningfulLine(f)
# Read species collision parameters
spec.molecularWeight = float(readMeaningfulLine(f).split()[1]) / 1000.0
spec.lennardJones = LennardJones(sigma=float(readMeaningfulLine(f).split()[1]), epsilon=float(readMeaningfulLine(f).split()[1]))
# Read species frequencies
spec.states = StatesModel()
data = readMeaningfulLine(f).split()
assert data[1] == 'cm^-1'
frequencies = []
for j in range(int(data[0])):
frequencies.append(float(readMeaningfulLine(f)))
spec.states.modes.append(HarmonicOscillator(frequencies))
# Read species external rotors
data = readMeaningfulLine(f).split()
assert data[0] == '0'
assert data[1] == 'cm^-1'
# Read species internal rotors
data = readMeaningfulLine(f).split()
assert data[1] == 'cm^-1'
frequencies = []
for j in range(int(data[0])):
frequencies.append(float(readMeaningfulLine(f)) * 2.9979e10)
data = readMeaningfulLine(f).split()
assert data[1] == 'cm^-1'
barriers = []
for j in range(int(data[0])):
barriers.append(float(readMeaningfulLine(f)) * 11.96)
inertia = [V0 / 2.0 / nu**2 / 6.022e23 for nu, V0 in zip(frequencies, barriers)]
for I, V0 in zip(inertia, barriers):
spec.states.modes.append(HinderedRotor(inertia=I, barrier=V0, symmetry=1))
# Read overall symmetry number
symm = int(readMeaningfulLine(f))
# Read isomer, reactant channel, and product channel data
Nisom = int(readMeaningfulLine(f))
Nreac = int(readMeaningfulLine(f))
Nprod = int(readMeaningfulLine(f))
for i in range(Nisom):
data = readMeaningfulLine(f).split()
assert data[0] == '1'
network.isomers.append(speciesDict[data[1]])
for i in range(Nreac):
data = readMeaningfulLine(f).split()
assert data[0] == '2'
network.reactants.append([speciesDict[data[1]], speciesDict[data[2]]])
for i in range(Nprod):
data = readMeaningfulLine(f).split()
if data[0] == '1':
network.products.append([speciesDict[data[1]]])
elif data[0] == '2':
network.products.append([speciesDict[data[1]], speciesDict[data[2]]])
# Read path reactions
Nrxn = int(readMeaningfulLine(f))
for i in range(Nrxn):
# Read and ignore reaction equation
equation = readMeaningfulLine(f)
rxn = Reaction(transitionState=TransitionState(), reversible=True)
network.pathReactions.append(rxn)
# Read reactant and product indices
data = readMeaningfulLine(f).split()
reac = int(data[0]) - 1
prod = int(data[1]) - 1
if reac < Nisom:
rxn.reactants = [network.isomers[reac]]
elif reac < Nisom+Nreac:
rxn.reactants = network.reactants[reac-Nisom]
else:
rxn.reactants = network.products[reac-Nisom-Nreac]
if prod < Nisom:
rxn.products = [network.isomers[prod]]
elif prod < Nisom+Nreac:
rxn.products = network.reactants[prod-Nisom]
else:
rxn.products = network.products[prod-Nisom-Nreac]
# Read reaction E0
data = readMeaningfulLine(f).split()
assert data[0] == 'J/mol'
rxn.transitionState.E0 = float(data[1])
# Read high-pressure limit kinetics
data = readMeaningfulLine(f)
assert data.lower() == 'arrhenius'
rxn.kinetics = ArrheniusModel(
A=float(readMeaningfulLine(f).split()[1]),
Ea=float(readMeaningfulLine(f).split()[1]),
n=float(readMeaningfulLine(f).split()[0]),
)
# Close file
f.close()
dirname, basename = os.path.split(os.path.abspath(fstr))
basename, ext = os.path.splitext(basename)
output = os.path.join(dirname, basename + '.svg')
network.drawPotentialEnergySurface(output, Eunits='kcal/mol')
|
jwallen/MEASURE
|
convertFAME.py
|
Python
|
mit
| 9,153
|
[
"ChemPy"
] |
04111e318c53e1bb930434cb2c5fadecb01927124fdf36bee671c55e81765b21
|
from __future__ import division
import time
import numpy as np
import itertools
import scipy.stats as spstats
from sklearn.base import BaseEstimator
LOSS = {"hinge": 1, "l1": 2, "l2": 3, "logit": 4, "eps_intensive": 5}
TASK = {"classification": 1, "regression": 2}
KERNEL = {"gaussian": 1}
class GOGP_SI:
def __init__(self, theta=0.9, gamma=1e-5, lbd=0.5,
percent_batch=0.1, epoch=1.0, core_limit=-1,
verbose=0):
self.X = None
self.gamma = gamma
self.lbd = lbd
self.theta = theta
self.epoch = epoch
self.percent_batch = percent_batch
self.core_limit = core_limit
self.w = None
self.w_index = None
self.w_l = 0
self.wnorm2 = None
self.train_time = 0
self.batch_time = 0
self.online_time = 0
self.test_time = 0
self.rmse_lst = None
self.final_rmse = 0
self.verbose = verbose
self.task = TASK["regression"]
# for testing
def get_kernel(self, x, y, gamma):
xy = x - y
return np.exp(-gamma * np.sum(xy*xy))
def get_wx(self, x):
e = self.get_kx(x)
return np.dot(self.w, e), e
def get_kk(self):
n_temp = self.X[self.w_index].shape[0]
norm = np.sum(self.X[self.w_index]**2,axis=1,keepdims=True) # (t,1)
t1 = np.tile(norm,(1,n_temp))
t2 = np.tile(norm.T,(n_temp,1))
t3 = -2*np.dot(self.X[self.w_index],self.X[self.w_index].T)
tmp = t1 + t2 + t3 # (t,t)
return np.exp(-self.gamma*tmp) # (t,t)
def get_kx(self, x):
if self.w_index.shape[0] == 0:
print 'Error w_index'
d = self.X[self.w_index]-x
d2 = np.sum(d*d, axis=1) # (t,)
e = np.exp(-self.gamma*d2) # (t,)
return e
def get_wnorm(self, w, x, n, gamma):
c = np.zeros((n,n))
for i in xrange(n):
for j in xrange(n):
k = self.get_kernel(x[i], x[j], gamma)
c[i,j] = w[i] * w[j] * k
return np.sum(c)
def fit_online_delay(self, X, y):
self.X = X
N = X.shape[0] # number of training set
D = X.shape[1]
sigma2 = N * self.lbd / 2.0
print 'N:', N, 'D:', D
print 'gamma:', self.gamma, 'theta:', self.theta, 'sigma2:', sigma2
start_time = time.time()
scale_ball = np.max(y) / np.sqrt(self.lbd);
if self.core_limit < 0:
self.core_limit = N
self.w = np.array([-2.0 * (0.0 - y[0])/self.lbd])
self.w_index = np.array([0])
self.w_l = 1
self.wnorm2 = self.w[0]**2
K_sigma2 = np.array([[1.0 + sigma2]])
KInv_sigma2 = np.array([[1.0 / K_sigma2[0,0]]])
N_batch = int(N * self.percent_batch)
T = int(self.epoch * N_batch)
pos_core = np.full(N_batch, -1, dtype=int)
pos_core[0] = 0
for t in xrange(1, N_batch+1): # from 1 -> =NBatch
nt = np.random.randint(0,N_batch)
eta = 1.0 / (t * self.lbd)
# predict y
wx, kx = self.get_wx(self.X[nt])
alpha = wx - y[nt]
# project
d_project = np.dot(KInv_sigma2, kx)
# print 'd_project', d_project.shape
dist2 = 1.0 - np.dot(d_project, kx)
# scale
scale = (t - 1.0) / t
self.w *= scale
self.wnorm2 *= scale * scale
if dist2 > self.theta:
# add
modifier = -2 * eta * alpha
if pos_core[nt] >= 0:
i_w_new = pos_core[nt]
self.w[i_w_new] += modifier
self.wnorm2 += 2 * modifier * wx * scale + (modifier * modifier)
else:
pos_core[nt] = self.w_l # please check whether it is correct
self.w_l += 1
self.w.resize(self.w_l)
self.w[self.w_l - 1] += modifier
self.w_index.resize(self.w_l)
self.w_index[self.w_l - 1] = nt
self.wnorm2 += 2 * modifier * wx * scale + (modifier * modifier)
K_sigma2 = np.pad(K_sigma2, ((0, 1), (0, 1)), 'constant', constant_values=(0, 0))
K_sigma2[self.w_l-1,:-1] = kx
K_sigma2[:-1, self.w_l-1] = kx
K_sigma2[self.w_l-1, self.w_l-1] = 1.0 + sigma2
KInv_sigma2 = np.linalg.inv(K_sigma2)
else:
# project
self.w -= 2 * eta * alpha * d_project # (t,)
ww = np.kron(self.w, self.w).reshape((self.w_l, self.w_l))
self.wnorm2 = np.sum(ww * (K_sigma2 - np.diag(np.full(self.w_l,sigma2)) ) )
# update w
wnorm = np.sqrt(self.wnorm2)
if (self.lbd < 2) and (wnorm > scale_ball):
print 'scale ball'
scale_project = scale_ball / wnorm
self.w *= scale_project
self.wnorm2 *= scale_project * scale_project
self.batch_time = time.time() - start_time
start_time = time.time()
ww_K_sigma2 = np.kron(self.w, self.w).reshape((self.w_l, self.w_l))
ww_K_sigma2 = ww_K_sigma2 * (K_sigma2 - np.diag(np.full(self.w_l, sigma2)))
self.wnorm2 = np.sum(ww_K_sigma2)
sum_mse = 0
self.rmse_lst = np.zeros(N)
self.core_size_lst = np.zeros(N)
c_rmse_lst = 0
for n in xrange(N_batch,N):
t = n + 1
nt = n
eta = 1.0 / (t * self.lbd)
# predict y
wx, kx = self.get_wx(self.X[nt])
alpha = wx - y[nt]
sum_mse += alpha**2
print round(t * 1.0 / N * 100,2), '%', np.sqrt(sum_mse / (t-N_batch)), self.w_l, '/', n
self.rmse_lst[c_rmse_lst] = np.sqrt(sum_mse / (t-N_batch))
self.core_size_lst[c_rmse_lst] = self.w_l
c_rmse_lst += 1
# project
print 'd_project', KInv_sigma2.shape, kx.shape
d_project = np.dot(KInv_sigma2, kx)
print 'd_project', d_project.shape
dist2 = 1.0 - np.dot(d_project, kx)
# scale
scale = (t - 1.0) / t
self.w *= scale
self.wnorm2 *= scale * scale
ww_K_sigma2 *= scale * scale
if dist2 > self.theta:
# add
self.w_l += 1
print 'dist2:', dist2, 'add:', self.w_l
modifier = -2 * eta * alpha
self.w.resize(self.w_l)
self.w[self.w_l - 1] = modifier
self.w_index.resize(self.w_l)
self.w_index[self.w_l - 1] = nt
self.wnorm2 += 2 * modifier * wx * scale + (modifier * modifier)
K_sigma2 = np.pad(K_sigma2, ((0, 1), (0, 1)), 'constant', constant_values=(0, 0))
K_sigma2[self.w_l-1,:-1] = kx
K_sigma2[:-1, self.w_l-1] = kx
K_sigma2[self.w_l-1, self.w_l-1] = 1.0 + sigma2
KInv_sigma2 = np.linalg.inv(K_sigma2)
ww_K_sigma2 = np.kron(self.w, self.w).reshape((self.w_l, self.w_l))
ww_K_sigma2 = ww_K_sigma2 * (K_sigma2 - np.diag(np.full(self.w_l, sigma2)))
else:
# project
project_mod = 2 * eta * alpha * d_project
kron_project_mod = np.kron(project_mod, project_mod).reshape((self.w_l, self.w_l))
kron_project_c = np.kron(project_mod, self.w).reshape((self.w_l, self.w_l))
kron_project_r = np.kron(self.w, project_mod).reshape((self.w_l, self.w_l))
ww_K_sigma2 += (- kron_project_c - kron_project_r + kron_project_mod) \
* (K_sigma2 - np.diag(np.full(self.w_l,sigma2)) )
self.w -= project_mod # (t,)
self.wnorm2 = np.sum(ww_K_sigma2)
# ww = np.kron(self.w, self.w).reshape((self.w_l, self.w_l))
# self.wnorm2 = np.sum(ww * (K_sigma2 - np.diag(np.full(self.w_l,sigma2)) ) )
# if np.sum(self.wnorm2 - np.sum(ww_K_sigma2)) > 1e-3:
# print 'Error:', self.wnorm2 , np.sum(ww_K_sigma2)
if self.w_l > self.core_limit:
# find the one to be remove
idx_remove = np.argmin(np.abs(self.w))
wx_remove, kx_remove = self.get_wx(self.X[idx_remove])
# remove
self.wnorm2 -= 2 * self.w[idx_remove] * wx_remove + (self.w[idx_remove] * self.w[idx_remove])
self.w = np.delete(self.w, idx_remove)
self.w_index = np.delete(self.w_index, idx_remove)
self.w_l -= 1
K_sigma2 = np.delete(K_sigma2, idx_remove, axis=0)
K_sigma2 = np.delete(K_sigma2, idx_remove, axis=1)
KInv_sigma2 = np.linalg.inv(K_sigma2)
ww_K_sigma2 = np.delete(ww_K_sigma2, idx_remove, axis=0)
ww_K_sigma2 = np.delete(ww_K_sigma2, idx_remove, axis=1)
# project
wx_remove, kx_remove = self.get_wx(self.X[idx_remove])
alpha_remove = wx_remove - y[idx_remove]
d_project_remove = np.dot(KInv_sigma2, kx_remove)
project_mod = 2 * eta * alpha_remove * d_project_remove
kron_project_mod = np.kron(project_mod, project_mod).reshape((self.w_l, self.w_l))
kron_project_c = np.kron(project_mod, self.w).reshape((self.w_l, self.w_l))
kron_project_r = np.kron(self.w, project_mod).reshape((self.w_l, self.w_l))
ww_K_sigma2 += (- kron_project_c - kron_project_r + kron_project_mod) \
* (K_sigma2 - np.diag(np.full(self.w_l, sigma2)))
self.w -= project_mod # (t,)
self.wnorm2 = np.sum(ww_K_sigma2)
# ww = np.kron(self.w, self.w).reshape((self.w_l, self.w_l))
# true_wnorm2 = np.sum(ww * (K_sigma2 - np.diag(np.full(self.w_l, sigma2)) ) )
# if np.abs(np.sum(self.wnorm2 - true_wnorm2)) > 1e-3:
# print 'Error:', self.wnorm2, true_wnorm2
# raise Exception
# update w
wnorm = np.sqrt(self.wnorm2)
if (self.lbd < 2) and (wnorm > scale_ball):
print 'scale ball'
scale_project = scale_ball / wnorm
self.w *= scale_project
self.wnorm2 *= scale_project * scale_project
self.online_time = time.time() - start_time
self.final_rmse = self.rmse_lst[c_rmse_lst-1]
def fit(self, X, y):
self.X = X
N = X.shape[0] # number of training set
D = X.shape[1]
sigma2 = N * self.lbd / 2.0
print 'N:', N, 'D:', D
print 'gamma:', self.gamma, 'theta:', self.theta, 'sigma2:', sigma2
start_time = time.time()
scale_ball = np.max(y) / np.sqrt(self.lbd);
self.w = np.array([-2.0 * (0.0 - y[0])/self.lbd])
self.w_index = np.array([0])
self.w_l = 1
self.wnorm2 = self.w[0]**2
K_sigma2 = np.array([[1.0 + sigma2]])
KInv_sigma2 = np.array([[1.0 / K_sigma2[0,0]]])
sum_mse = 0
self.rmse_lst = np.zeros(N)
c_rmse_lst = 0
for n in xrange(1,N):
t = n + 1
nt = n
eta = 1.0 / (t * self.lbd)
# predict y
wx, kx = self.get_wx(self.X[nt])
alpha = wx - y[nt]
sum_mse += alpha**2
print round(t * 1.0 / N * 100,2), '%', np.sqrt(sum_mse / t), self.w_l, '/', n
self.rmse_lst[c_rmse_lst] = np.sqrt(sum_mse / t)
c_rmse_lst += 1
# project
d_project = np.dot(KInv_sigma2, kx)
# print 'd_project', d_project.shape
dist2 = 1.0 - np.dot(d_project, kx)
# scale
scale = (t - 1.0) / t
self.w *= scale
self.wnorm2 *= scale * scale
if dist2 > self.theta:
# add
self.w_l += 1
print 'dist2:', dist2, 'add:', self.w_l
modifier = -2 * eta * alpha
self.w.resize(self.w_l)
self.w[self.w_l - 1] = modifier
self.w_index.resize(self.w_l)
self.w_index[self.w_l - 1] = nt
self.wnorm2 += 2 * modifier * wx * scale + (modifier * modifier)
K_sigma2 = np.pad(K_sigma2, ((0, 1), (0, 1)), 'constant', constant_values=(0, 0))
K_sigma2[self.w_l-1,:-1] = kx
K_sigma2[:-1, self.w_l-1] = kx
K_sigma2[self.w_l-1, self.w_l-1] = 1.0 + sigma2
KInv_sigma2 = np.linalg.inv(K_sigma2)
else:
# project
self.w -= 2 * eta * alpha * d_project # (t,)
ww = np.kron(self.w, self.w).reshape((self.w_l, self.w_l))
self.wnorm2 = np.sum(ww * (K_sigma2 - np.diag(np.full(self.w_l,sigma2)) ) )
# update w
wnorm = np.sqrt(self.wnorm2)
if (self.lbd < 2) and (wnorm > scale_ball):
print 'scale ball'
scale_project = scale_ball / wnorm
self.w *= scale_project
self.wnorm2 *= scale_project * scale_project
self.train_time = time.time() - start_time
self.final_rmse = self.rmse_lst[c_rmse_lst-1]
def fit_batch(self, X, y):
self.X = X
N = X.shape[0] # number of training set
D = X.shape[1]
sigma2 = N * self.lbd / 2.0
print 'N:', N, 'D:', D
print 'gamma:', self.gamma, 'theta:', self.theta, 'sigma2:', sigma2
start_time = time.time()
scale_ball = np.max(y) / np.sqrt(self.lbd);
self.w = np.array([-2.0 * (0.0 - y[0])/self.lbd])
self.w_index = np.array([0])
self.w_l = 1
self.wnorm2 = self.w[0]**2
K_sigma2 = np.array([[1.0 + sigma2]])
KInv_sigma2 = np.array([[1.0 / K_sigma2[0,0]]])
T = int(self.epoch * N)
pos_core = np.full(N, -1, dtype=int)
pos_core[0] = 0
for t in xrange(1, T):
nt = np.random.randint(0,N)
eta = 1.0 / (t * self.lbd)
# predict y
wx, kx = self.get_wx(self.X[nt])
alpha = wx - y[nt]
print alpha**2
# project
d_project = np.dot(KInv_sigma2, kx)
# print 'd_project', d_project.shape
dist2 = 1.0 - np.dot(d_project, kx)
if dist2 < 0:
print 'error dist'
# scale
scale = (t - 1.0) / t
self.w *= scale
self.wnorm2 *= scale * scale
if dist2 > self.theta:
# add
modifier = -2 * eta * alpha
if pos_core[nt] >= 0:
i_w_new = pos_core[nt]
self.w[i_w_new] += modifier
self.wnorm2 += 2 * modifier * wx * scale + (modifier * modifier)
else:
pos_core[nt] = self.w_l
self.w_l += 1
self.w.resize(self.w_l)
self.w[self.w_l - 1] = modifier # not +=
self.w_index.resize(self.w_l)
self.w_index[self.w_l - 1] = nt
self.wnorm2 += 2 * modifier * wx * scale + (modifier * modifier)
# wnorm2 = self.get_wnorm(self.w, X, self.w_l, self.gamma)
# if np.sum(self.wnorm2-wnorm2) > 1e-3:
# print 'error wnorm2'
K_sigma2 = np.pad(K_sigma2, ((0, 1), (0, 1)), 'constant', constant_values=(0, 0))
K_sigma2[self.w_l-1,:-1] = kx
K_sigma2[:-1, self.w_l-1] = kx
K_sigma2[self.w_l-1, self.w_l-1] = 1.0 + sigma2
KInv_sigma2 = np.linalg.inv(K_sigma2)
else:
# project
self.w -= 2 * eta * alpha * d_project # (t,)
ww = np.kron(self.w, self.w).reshape((self.w_l, self.w_l))
self.wnorm2 = np.sum(ww * (K_sigma2 - np.diag(np.full(self.w_l,sigma2)) ) )
# update w
wnorm = np.sqrt(self.wnorm2)
if (self.lbd < 2) and (wnorm > scale_ball):
# print 'scale ball'
scale_project = scale_ball / wnorm
self.w *= scale_project
self.wnorm2 *= scale_project * scale_project
self.batch_time = time.time() - start_time
def predict(self, XTest):
NTest = XTest.shape[0]
ypred = np.zeros(NTest)
for n in xrange(NTest):
ypred[n], kx = self.get_wx(XTest[n])
return ypred
|
khanhndk/GoGP
|
gogp-py/gogp_si.py
|
Python
|
gpl-3.0
| 16,932
|
[
"Gaussian"
] |
a42d39a22252ff5510c969e89e2224ebd82ad9a1cd56929aba250e5d3509193d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The :mod:`sklearn.gaussian_process` module implements scalar Gaussian Process
based predictions.
"""
from .gaussian_process import GaussianProcess
from . import correlation_models
from . import regression_models
__all__ = ['GaussianProcess', 'correlation_models', 'regression_models']
|
depet/scikit-learn
|
sklearn/gaussian_process/__init__.py
|
Python
|
bsd-3-clause
| 472
|
[
"Gaussian"
] |
9b829e5f170519ca7255e47ad8a5a825a51b7364b5ca5447d0c6ffbfb873d6b5
|
import numpy as np
import histomicstk.utils as htk_utils
def vesselness(im_input, sigma):
"""
Calculates vesselness measure for grayscale image `im_input` at scale `sigma`.
Also returns eigenvalues and vectors used for vessel salience filters.
Parameters
----------
im_input : array_like
M x N grayscale image.
sigma : double
standard deviation of gaussian kernel.
Returns
-------
Deviation : array_like
M x N image of deviation from blob
Frobenius : array_like
M x N image of frobenius norm of Hessian - measures presence of
structure.
E : array_like
M x N x 2 eigenvalue image - see eigen.py.
Theta : array_like
M x N eigenvector angle image for E(:,:,0) in radians
see eigen.py. Oriented parallel to vessel structures.
References
----------
.. [#] Frangi, Alejandro F., et al. "Multiscale vessel enhancement
filtering." Medical Image Computing and Computer-Assisted
Interventation. MICCAI98. Springer Berlin Heidelberg,1998. 130-137.
"""
# calculate hessian matrix
H = sigma ** 2 * htk_utils.hessian(im_input, sigma)
# calculate eigenvalue image
E, V1, V2 = htk_utils.eigen(H)
# compute blobness measures
Deviation = E[:, :, 0]/(E[:, :, 1] + np.spacing(1))
Frobenius = np.sqrt(E[:, :, 0]**2 + E[:, :, 1]**2)
# calculate angles for 'Theta'
Theta = np.arctan2(V1[:, :, 1], V1[:, :, 0])
return Deviation, Frobenius, E, Theta
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/filters/shape/vesselness.py
|
Python
|
apache-2.0
| 1,523
|
[
"Gaussian"
] |
817879a4d41b345ddd28812faf0512ed6e3b057f918710ba776c8248a0a63503
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A collection of routines which create standard Cubes for test purposes.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import zip
import os.path
import numpy as np
import numpy.ma as ma
from iris.cube import Cube
import iris.aux_factory
import iris.coords
import iris.coords as icoords
import iris.tests as tests
from iris.coord_systems import GeogCS, RotatedGeogCS
def lat_lon_cube():
"""
Returns a cube with a latitude and longitude suitable for testing
saving to PP/NetCDF etc.
"""
cube = Cube(np.arange(12, dtype=np.int32).reshape((3, 4)))
cs = GeogCS(6371229)
coord = iris.coords.DimCoord(points=np.array([-1, 0, 1], dtype=np.int32),
standard_name='latitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 0)
coord = iris.coords.DimCoord(points=np.array([-1, 0, 1, 2], dtype=np.int32),
standard_name='longitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 1)
return cube
def global_pp():
"""
Returns a two-dimensional cube derived from PP/aPPglob1/global.pp.
The standard_name and unit attributes are added to compensate for the
broken STASH encoding in that file.
"""
def callback_global_pp(cube, field, filename):
cube.standard_name = 'air_temperature'
cube.units = 'K'
path = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
cube = iris.load_cube(path, callback=callback_global_pp)
return cube
def simple_pp():
filename = tests.get_data_path(['PP', 'simple_pp', 'global.pp']) # Differs from global_pp()
cube = iris.load_cube(filename)
return cube
def simple_1d(with_bounds=True):
"""
Returns an abstract, one-dimensional cube.
>>> print(simple_1d())
thingness (foo: 11)
Dimension coordinates:
foo x
>>> print(repr(simple_1d().data))
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
cube = Cube(np.arange(11, dtype=np.int32))
cube.long_name = 'thingness'
cube.units = '1'
points = np.arange(11, dtype=np.int32) + 1
bounds = np.column_stack([np.arange(11, dtype=np.int32), np.arange(11, dtype=np.int32) + 1])
coord = iris.coords.DimCoord(points, long_name='foo', units='1', bounds=bounds)
cube.add_dim_coord(coord, 0)
return cube
def simple_2d(with_bounds=True):
"""
Returns an abstract, two-dimensional, optionally bounded, cube.
>>> print(simple_2d())
thingness (bar: 3; foo: 4)
Dimension coordinates:
bar x -
foo - x
>>> print(repr(simple_2d().data))
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
"""
cube = Cube(np.arange(12, dtype=np.int32).reshape((3, 4)))
cube.long_name = 'thingness'
cube.units = '1'
y_points = np.array([ 2.5, 7.5, 12.5])
y_bounds = np.array([[0, 5], [5, 10], [10, 15]], dtype=np.int32)
y_coord = iris.coords.DimCoord(y_points, long_name='bar', units='1', bounds=y_bounds if with_bounds else None)
x_points = np.array([ -7.5, 7.5, 22.5, 37.5])
x_bounds = np.array([[-15, 0], [0, 15], [15, 30], [30, 45]], dtype=np.int32)
x_coord = iris.coords.DimCoord(x_points, long_name='foo', units='1', bounds=x_bounds if with_bounds else None)
cube.add_dim_coord(y_coord, 0)
cube.add_dim_coord(x_coord, 1)
return cube
def simple_2d_w_multidim_coords(with_bounds=True):
"""
Returns an abstract, two-dimensional, optionally bounded, cube.
>>> print(simple_2d_w_multidim_coords())
thingness (*ANONYMOUS*: 3; *ANONYMOUS*: 4)
Auxiliary coordinates:
bar x x
foo x x
>>> print(repr(simple_2d().data))
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]
"""
cube = simple_3d_w_multidim_coords(with_bounds)[0, :, :]
cube.remove_coord('wibble')
cube.data = np.arange(12, dtype=np.int32).reshape((3, 4))
return cube
def simple_3d_w_multidim_coords(with_bounds=True):
"""
Returns an abstract, two-dimensional, optionally bounded, cube.
>>> print(simple_3d_w_multidim_coords())
thingness (wibble: 2; *ANONYMOUS*: 3; *ANONYMOUS*: 4)
Dimension coordinates:
wibble x - -
Auxiliary coordinates:
bar - x x
foo - x x
>>> print(simple_3d_w_multidim_coords().data)
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
"""
cube = Cube(np.arange(24, dtype=np.int32).reshape((2, 3, 4)))
cube.long_name = 'thingness'
cube.units = '1'
y_points = np.array([[2.5, 7.5, 12.5, 17.5],
[10., 17.5, 27.5, 42.5],
[15., 22.5, 32.5, 50.]])
y_bounds = np.array([[[0, 5], [5, 10], [10, 15], [15, 20]],
[[5, 15], [15, 20], [20, 35], [35, 50]],
[[10, 20], [20, 25], [25, 40], [40, 60]]],
dtype=np.int32)
y_coord = iris.coords.AuxCoord(points=y_points, long_name='bar',
units='1',
bounds=y_bounds if with_bounds else None)
x_points = np.array([[-7.5, 7.5, 22.5, 37.5],
[-12.5, 4., 26.5, 47.5],
[2.5, 14., 36.5, 44.]])
x_bounds = np.array([[[-15, 0], [0, 15], [15, 30], [30, 45]],
[[-25, 0], [0, 8], [8, 45], [45, 50]],
[[-5, 10], [10, 18], [18, 55], [18, 70]]],
dtype=np.int32)
x_coord = iris.coords.AuxCoord(points=x_points, long_name='foo',
units='1',
bounds=x_bounds if with_bounds else None)
wibble_coord = iris.coords.DimCoord(np.array([10., 30.],
dtype=np.float32),
long_name='wibble', units='1')
cube.add_dim_coord(wibble_coord, [0])
cube.add_aux_coord(y_coord, [1, 2])
cube.add_aux_coord(x_coord, [1, 2])
return cube
def simple_3d():
"""
Returns an abstract three dimensional cube.
>>> print(simple_3d())
thingness / (1) (wibble: 2; latitude: 3; longitude: 4)
Dimension coordinates:
wibble x - -
latitude - x -
longitude - - x
>>> print(simple_3d().data)
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
"""
cube = Cube(np.arange(24, dtype=np.int32).reshape((2, 3, 4)))
cube.long_name = 'thingness'
cube.units = '1'
wibble_coord = iris.coords.DimCoord(np.array([10., 30.],
dtype=np.float32),
long_name='wibble', units='1')
lon = iris.coords.DimCoord([-180, -90, 0, 90],
standard_name='longitude',
units='degrees', circular=True)
lat = iris.coords.DimCoord([90, 0, -90],
standard_name='latitude', units='degrees')
cube.add_dim_coord(wibble_coord, [0])
cube.add_dim_coord(lat, [1])
cube.add_dim_coord(lon, [2])
return cube
def simple_3d_mask():
"""
Returns an abstract three dimensional cube that has data masked.
>>> print(simple_3d_mask())
thingness / (1) (wibble: 2; latitude: 3; longitude: 4)
Dimension coordinates:
wibble x - -
latitude - x -
longitude - - x
>>> print(simple_3d_mask().data)
[[[-- -- -- --]
[-- -- -- --]
[-- 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
"""
cube = simple_3d()
cube.data = ma.asanyarray(cube.data)
cube.data = ma.masked_less_equal(cube.data, 8.)
return cube
def track_1d(duplicate_x=False):
"""
Returns a one-dimensional track through two-dimensional space.
>>> print(track_1d())
air_temperature (y, x: 11)
Dimensioned coords:
x -> x
y -> y
Single valued coords:
>>> print(repr(track_1d().data))
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
"""
cube = Cube(np.arange(11, dtype=np.int32), standard_name='air_temperature', units='K')
bounds = np.column_stack([np.arange(11, dtype=np.int32), np.arange(11, dtype=np.int32) + 1])
pts = bounds[:, 1]
coord = iris.coords.AuxCoord(pts, 'projection_x_coordinate', units='1', bounds=bounds)
cube.add_aux_coord(coord, [0])
if duplicate_x:
coord = iris.coords.AuxCoord(pts, 'projection_x_coordinate', units='1', bounds=bounds)
cube.add_aux_coord(coord, [0])
coord = iris.coords.AuxCoord(pts * 2, 'projection_y_coordinate', units='1', bounds=bounds * 2)
cube.add_aux_coord(coord, 0)
return cube
def simple_2d_w_multidim_and_scalars():
data = np.arange(50, dtype=np.int32).reshape((5, 10))
cube = iris.cube.Cube(data, long_name='test 2d dimensional cube', units='meters')
# DimCoords
dim1 = iris.coords.DimCoord(np.arange(5, dtype=np.float32) * 5.1 + 3.0, long_name='dim1', units='meters')
dim2 = iris.coords.DimCoord(np.arange(10, dtype=np.int32), long_name='dim2', units='meters',
bounds=np.arange(20, dtype=np.int32).reshape(10, 2))
# Scalars
an_other = iris.coords.AuxCoord(3.0, long_name='an_other', units='meters')
yet_an_other = iris.coords.DimCoord(23.3, standard_name='air_temperature',
long_name='custom long name',
var_name='custom_var_name',
units='K')
# Multidim
my_multi_dim_coord = iris.coords.AuxCoord(np.arange(50, dtype=np.int32).reshape(5, 10),
long_name='my_multi_dim_coord', units='1',
bounds=np.arange(200, dtype=np.int32).reshape(5, 10, 4))
cube.add_dim_coord(dim1, 0)
cube.add_dim_coord(dim2, 1)
cube.add_aux_coord(an_other)
cube.add_aux_coord(yet_an_other)
cube.add_aux_coord(my_multi_dim_coord, [0, 1])
return cube
def hybrid_height():
"""
Returns a two-dimensional (Z, X), hybrid-height cube.
>>> print(hybrid_height())
TODO: Update!
air_temperature (level_height: 3; *ANONYMOUS*: 4)
Dimension coordinates:
level_height x -
Auxiliary coordinates:
model_level_number x -
sigma x -
surface_altitude - x
Derived coordinates:
altitude x x
>>> print(hybrid_height().data)
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
"""
data = np.arange(12, dtype='i8').reshape((3, 4))
orography = icoords.AuxCoord([10, 25, 50, 5], standard_name='surface_altitude', units='m')
model_level = icoords.AuxCoord([2, 1, 0], standard_name='model_level_number')
level_height = icoords.DimCoord([100, 50, 10], long_name='level_height',
units='m', attributes={'positive': 'up'},
bounds=[[150, 75], [75, 20], [20, 0]])
sigma = icoords.AuxCoord([0.8, 0.9, 0.95], long_name='sigma',
bounds=[[0.7, 0.85], [0.85, 0.97], [0.97, 1.0]])
hybrid_height = iris.aux_factory.HybridHeightFactory(level_height, sigma, orography)
cube = iris.cube.Cube(data, standard_name='air_temperature', units='K',
dim_coords_and_dims=[(level_height, 0)],
aux_coords_and_dims=[(orography, 1), (model_level, 0), (sigma, 0)],
aux_factories=[hybrid_height])
return cube
def simple_4d_with_hybrid_height():
cube = iris.cube.Cube(np.arange(3*4*5*6, dtype='i8').reshape(3, 4, 5, 6),
"air_temperature", units="K")
cube.add_dim_coord(iris.coords.DimCoord(np.arange(3, dtype='i8'), "time",
units="hours since epoch"), 0)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(4, dtype='i8')+10,
"model_level_number", units="1"), 1)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(5, dtype='i8')+20,
"grid_latitude",
units="degrees"), 2)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(6, dtype='i8')+30,
"grid_longitude",
units="degrees"), 3)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(4, dtype='i8')+40,
long_name="level_height",
units="m"), 1)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(4, dtype='i8')+50,
long_name="sigma", units="1"), 1)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(5*6, dtype='i8').reshape(5, 6)+100,
long_name="surface_altitude",
units="m"), [2, 3])
cube.add_aux_factory(iris.aux_factory.HybridHeightFactory(
delta=cube.coord("level_height"),
sigma=cube.coord("sigma"),
orography=cube.coord("surface_altitude")))
return cube
def realistic_3d():
"""
Returns a realistic 3d cube.
>>> print(repr(realistic_3d()))
<iris 'Cube' of air_potential_temperature (time: 7; grid_latitude: 9;
grid_longitude: 11)>
"""
data = np.arange(7*9*11).reshape((7,9,11))
lat_pts = np.linspace(-4, 4, 9)
lon_pts = np.linspace(-5, 5, 11)
time_pts = np.linspace(394200, 394236, 7)
forecast_period_pts = np.linspace(0, 36, 7)
ll_cs = RotatedGeogCS(37.5, 177.5, ellipsoid=GeogCS(6371229.0))
lat = icoords.DimCoord(lat_pts, standard_name='grid_latitude',
units='degrees', coord_system=ll_cs)
lon = icoords.DimCoord(lon_pts, standard_name='grid_longitude',
units='degrees', coord_system=ll_cs)
time = icoords.DimCoord(time_pts, standard_name='time',
units='hours since 1970-01-01 00:00:00')
forecast_period = icoords.DimCoord(forecast_period_pts,
standard_name='forecast_period',
units='hours')
height = icoords.DimCoord(1000.0, standard_name='air_pressure',
units='Pa')
cube = iris.cube.Cube(data, standard_name='air_potential_temperature',
units='K',
dim_coords_and_dims=[(time, 0), (lat, 1), (lon, 2)],
aux_coords_and_dims=[(forecast_period, 0),
(height, None)],
attributes={'source': 'Iris test case'})
return cube
def realistic_4d():
"""
Returns a realistic 4d cube.
>>> print(repr(realistic_4d()))
<iris 'Cube' of air_potential_temperature (time: 6; model_level_number: 70; grid_latitude: 100; grid_longitude: 100)>
"""
# the stock arrays were created in Iris 0.8 with:
# >>> fname = iris.sample_data_path('PP', 'COLPEX', 'theta_and_orog_subset.pp')
# >>> theta = iris.load_cube(fname, 'air_potential_temperature')
# >>> for coord in theta.coords():
# ... print(coord.name, coord.has_points(), coord.has_bounds(), coord.units)
# ...
# grid_latitude True True degrees
# grid_longitude True True degrees
# level_height True True m
# model_level True False 1
# sigma True True 1
# time True False hours since 1970-01-01 00:00:00
# source True False no_unit
# forecast_period True False hours
# >>> arrays = []
# >>> for coord in theta.coords():
# ... if coord.has_points(): arrays.append(coord.points)
# ... if coord.has_bounds(): arrays.append(coord.bounds)
# >>> arrays.append(theta.data)
# >>> arrays.append(theta.coord('sigma').coord_system.orography.data)
# >>> np.savez('stock_arrays.npz', *arrays)
data_path = os.path.join(os.path.dirname(__file__), 'stock_arrays.npz')
r = np.load(data_path)
# sort the arrays based on the order they were originally given. The names given are of the form 'arr_1' or 'arr_10'
_, arrays = zip(*sorted(r.iteritems(), key=lambda item: int(item[0][4:])))
lat_pts, lat_bnds, lon_pts, lon_bnds, level_height_pts, \
level_height_bnds, model_level_pts, sigma_pts, sigma_bnds, time_pts, \
_source_pts, forecast_period_pts, data, orography = arrays
ll_cs = RotatedGeogCS(37.5, 177.5, ellipsoid=GeogCS(6371229.0))
lat = icoords.DimCoord(lat_pts, standard_name='grid_latitude', units='degrees',
bounds=lat_bnds, coord_system=ll_cs)
lon = icoords.DimCoord(lon_pts, standard_name='grid_longitude', units='degrees',
bounds=lon_bnds, coord_system=ll_cs)
level_height = icoords.DimCoord(level_height_pts, long_name='level_height',
units='m', bounds=level_height_bnds,
attributes={'positive': 'up'})
model_level = icoords.DimCoord(model_level_pts, standard_name='model_level_number',
units='1', attributes={'positive': 'up'})
sigma = icoords.AuxCoord(sigma_pts, long_name='sigma', units='1', bounds=sigma_bnds)
orography = icoords.AuxCoord(orography, standard_name='surface_altitude', units='m')
time = icoords.DimCoord(time_pts, standard_name='time', units='hours since 1970-01-01 00:00:00')
forecast_period = icoords.DimCoord(forecast_period_pts, standard_name='forecast_period', units='hours')
hybrid_height = iris.aux_factory.HybridHeightFactory(level_height, sigma, orography)
cube = iris.cube.Cube(data, standard_name='air_potential_temperature', units='K',
dim_coords_and_dims=[(time, 0), (model_level, 1), (lat, 2), (lon, 3)],
aux_coords_and_dims=[(orography, (2, 3)), (level_height, 1), (sigma, 1),
(forecast_period, None)],
attributes={'source': 'Iris test case'},
aux_factories=[hybrid_height])
return cube
def realistic_4d_no_derived():
"""
Returns a realistic 4d cube without hybrid height
>>> print(repr(realistic_4d()))
<iris 'Cube' of air_potential_temperature (time: 6; model_level_number: 70; grid_latitude: 100; grid_longitude: 100)>
"""
cube = realistic_4d()
# TODO determine appropriate way to remove aux_factory from a cube
cube._aux_factories = []
return cube
def realistic_4d_w_missing_data():
data_path = os.path.join(os.path.dirname(__file__), 'stock_mdi_arrays.npz')
data_archive = np.load(data_path)
data = ma.masked_array(data_archive['arr_0'], mask=data_archive['arr_1'])
# sort the arrays based on the order they were originally given. The names given are of the form 'arr_1' or 'arr_10'
ll_cs = GeogCS(6371229)
lat = iris.coords.DimCoord(np.arange(20, dtype=np.float32), standard_name='grid_latitude',
units='degrees', coord_system=ll_cs)
lon = iris.coords.DimCoord(np.arange(20, dtype=np.float32), standard_name='grid_longitude',
units='degrees', coord_system=ll_cs)
time = iris.coords.DimCoord([1000., 1003., 1006.], standard_name='time',
units='hours since 1970-01-01 00:00:00')
forecast_period = iris.coords.DimCoord([0.0, 3.0, 6.0], standard_name='forecast_period', units='hours')
pressure = iris.coords.DimCoord(np.array([ 800., 900., 1000.], dtype=np.float32),
long_name='pressure', units='hPa')
cube = iris.cube.Cube(data, long_name='missing data test data', units='K',
dim_coords_and_dims=[(time, 0), (pressure, 1), (lat, 2), (lon, 3)],
aux_coords_and_dims=[(forecast_period, 0)],
attributes={'source':'Iris test case'})
return cube
def global_grib2():
path = tests.get_data_path(('GRIB', 'global_t', 'global.grib2'))
cube = iris.load_cube(path)
return cube
|
Jozhogg/iris
|
lib/iris/tests/stock.py
|
Python
|
lgpl-3.0
| 22,502
|
[
"NetCDF"
] |
03172ad0aa8f928376decab69c48db609e9ca49c91b4e9b3958319c7401deaf0
|
#! test QC_JSON Schema with ghost atoms
import numpy as np
import psi4
import json
# Generate JSON data
json_data = {
"schema_name": "qc_schema_input",
"schema_version": 1,
"molecule": {
"geometry": [
0.0,
0.0,
-5.0,
0.0,
0.0,
5.0,
],
"symbols": ["He", "He"],
"real": [True, False]
},
"driver": "energy",
"model": {
"method": "SCF",
"basis": "cc-pVDZ"
},
"keywords": {
"scf_type": "df"
},
"memory": 1024 * 1024 * 1024,
"nthreads": 1,
}
# Write expected output
expected_return_result = -2.85518836280515
expected_properties = {
'calcinfo_nbasis': 10,
'calcinfo_nmo': 10,
'calcinfo_nalpha': 1,
'calcinfo_nbeta': 1,
'calcinfo_natom': 2,
'scf_one_electron_energy': -3.8820496359492576,
'scf_two_electron_energy': 1.0268612731441076,
'nuclear_repulsion_energy': 0.0,
'scf_total_energy': -2.85518836280515,
'return_energy': -2.85518836280515
}
json_ret = psi4.json_wrapper.run_json(json_data)
with open("output.json", "w") as ofile: #TEST
json.dump(json_ret, ofile, indent=2) #TEST
psi4.compare_integers(True, json_ret["success"], "JSON Success") #TEST
psi4.compare_values(expected_return_result, json_ret["return_result"], 5, "Return Value") #TEST
for k in expected_properties.keys(): #TEST
psi4.compare_values(expected_properties[k], json_ret["properties"][k], 5, k.upper()) #TEST
|
CDSherrill/psi4
|
tests/json/schema-1-ghost/input.py
|
Python
|
lgpl-3.0
| 1,563
|
[
"Psi4"
] |
3cae88b62aa032949b389730b702744e475b95fc3c51fddec0e87c2cf345f285
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
from __future__ import print_function
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'exe',
)
# Files for which the executable bit may or may not be set.
IGNORED_EXTENSIONS = (
'dylib',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'base/third_party/libevent/autogen.sh',
'base/third_party/libevent/test/test.sh',
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
'third_party/blink/web_tests/external/wpt/tools/third_party/',
# TODO(maruel): Fix these.
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/libxml/linux/xml2-config',
'third_party/protobuf/',
'third_party/sqlite/',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert (set(EXECUTABLE_EXTENSIONS) & set(IGNORED_EXTENSIONS) &
set(NON_EXECUTABLE_EXTENSIONS) == set())
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def ignored_extension(rel_path):
"""The file name represents a file type that may or may not have the
executable set.
"""
return os.path.splitext(rel_path)[1][1:] in IGNORED_EXTENSIONS
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/' or data == '#! /', data == '\x7fELF')
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). The Chromium
# tree may have invalid symlinks.
return None
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set')
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set')
return
if ignored_extension(rel_path):
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bit:
return result_dict('Has executable bit but not shebang or ELF header')
if shebang:
return result_dict('Has shebang but not executable bit')
return result_dict('Has ELF header but not executable bit')
def check_files(root, files):
gen = (check_file(root, f) for f in files
if not is_ignored(f) and not os.path.isdir(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print('Found %s files' % len(self._files))
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option(
'--file-list',
help='Specifies a file with a list of files (one per line) to check the '
'permissions of. Only these files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.files and options.file_list:
parser.error('--file and --file-list are mutually exclusive options')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
errors = check_files(options.root, options.files)
elif options.file_list:
with open(options.file_list) as file_list:
files = file_list.read().splitlines()
errors = check_files(options.root, files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print('\n'.join(e['full_path'] for e in errors))
else:
print('\nFAILED\n')
print('\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors))
return 1
if not options.bare:
print('\nSUCCESS\n')
return 0
if '__main__' == __name__:
sys.exit(main())
|
endlessm/chromium-browser
|
tools/checkperms/checkperms.py
|
Python
|
bsd-3-clause
| 13,204
|
[
"xTB"
] |
f9d6d3d7ee6ca4103e6bd8a0cea55e6fb95f75af611ad1d8aab7b748de90dea0
|
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Client.py 1496 2010-03-04 23:46:17Z pooryorick $'
from .version import __version__
#import xml.sax
import urllib.request, urllib.parse, urllib.error
from SOAPpy.Types import *
import re
import base64
import socket, http.client
from http.client import HTTPConnection
import http.cookies
# SOAPpy-py3 modules
from .Errors import *
from .Config import Config
from .Parser import parseSOAPRPC
from .SOAPBuilder import buildSOAP
from .Utilities import *
from SOAPpy.Types import faultType, simplify
import collections
################################################################################
# Client
################################################################################
def SOAPUserAgent():
return "SOAPpy-py3 " + __version__ + " (pywebsvcs.sf.net)"
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will raise
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.putheader = conn.putheader
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
(self._conn.host, self._conn.port) = self._conn._get_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
if not buffering:
response = self._conn.getresponse()
else:
#only add this keyword if non-default for compatibility
#with other connection classes
response = self._conn.getresponse(buffering)
except http.BadStatusLine as e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
class SOAPAddress:
def __init__(self, url, config = Config):
proto, uri = urllib.parse.splittype(url)
# apply some defaults
if uri[0:2] != '//':
if proto != None:
uri = proto + ':' + uri
uri = '//' + uri
proto = 'http'
host, path = urllib.parse.splithost(uri)
try:
int(host)
host = 'localhost:' + host
except:
pass
if not path:
path = '/'
if proto not in ('http', 'https', 'httpg'):
raise IOError("unsupported SOAP protocol")
if proto == 'httpg' and not config.GSIclient:
raise AttributeError("GSI client not supported by this Python installation")
if proto == 'https' and not config.SSLclient:
raise AttributeError("SSL client not supported by this Python installation")
self.user,host = urllib.parse.splituser(host)
self.proto = proto
self.host = host
self.path = path
def __str__(self):
return "%(proto)s://%(host)s%(path)s" % self.__dict__
__repr__ = __str__
class SOAPTimeoutError(socket.timeout):
'''This exception is raised when a timeout occurs in SOAP operations'''
pass
class HTTPConnectionWithTimeout(HTTPConnection):
'''Extend HTTPConnection for timeout support'''
def __init__(self, host, port=None, strict=None, timeout=None):
HTTPConnection.__init__(self, host, port, strict)
self._timeout = timeout
def connect(self):
HTTPConnection.connect(self)
if self.sock and self._timeout:
self.sock.settimeout(self._timeout)
class HTTPWithTimeout(HTTP):
_connection_class = HTTPConnectionWithTimeout
def __init__(self, host='', port=None, strict=None, timeout=None):
"""Slight modification of superclass (httplib.HTTP) constructor.
The only change is that arg ``timeout`` is also passed in the
initialization of :attr:`_connection_class`.
:param timeout: for the socket connection (seconds); None to disable
:type timeout: float or None
"""
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, timeout))
class HTTPTransport:
def __init__(self):
self.cookies = http.cookies.SimpleCookie();
def getNS(self, original_namespace, data):
"""Extract the (possibly extended) namespace from the returned
SOAP message."""
if type(original_namespace) == StringType:
pattern="xmlns:\w+=['\"](" + original_namespace + "[^'\"]*)['\"]"
match = re.search(pattern, data)
if match:
return match.group(1)
else:
return original_namespace
else:
return original_namespace
def __addcookies(self, r):
'''Add cookies from self.cookies to request r
'''
for cname, morsel in list(self.cookies.items()):
attrs = []
value = morsel.get('version', '')
if value != '' and value != '0':
attrs.append('$Version=%s' % value)
attrs.append('%s=%s' % (cname, morsel.coded_value))
value = morsel.get('path')
if value:
attrs.append('$Path=%s' % value)
value = morsel.get('domain')
if value:
attrs.append('$Domain=%s' % value)
r.putheader('Cookie', "; ".join(attrs))
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = Config, timeout=None):
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'httpg':
from pyGlobus.io import GSIHTTP
r = GSIHTTP(real_addr, tcpAttr = config.tcpAttr)
elif addr.proto == 'https':
r = http.client.HTTPS(real_addr, key_file=config.SSL.key_file, cert_file=config.SSL.cert_file)
else:
r = HTTPWithTimeout(real_addr, timeout=timeout)
r.putrequest("POST", real_path)
r.putheader("Host", addr.host)
r.putheader("User-agent", SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset=%s' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
self.__addcookies(r);
# if user is not a user:passwd format
# we'll receive a failure from the server. . .I guess (??)
if addr.user != None:
val = base64.encodestring(urllib.parse.unquote_plus(addr.user))
r.putheader('Authorization','Basic ' + val.replace('\012',''))
# This fixes sending either "" or "None"
if soapaction == None or len(soapaction) == 0:
r.putheader("SOAPAction", "")
else:
r.putheader("SOAPAction", '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
debugHeader(s)
print("POST %s %s" % (real_path, r._http_vsn_str))
print("Host:", addr.host)
print("User-agent: SOAPpy-py3 " + __version__ + " (http://pywebsvcs.sf.net)")
print("Content-type:", t)
print("Content-length:", len(data))
print('SOAPAction: "%s"' % soapaction)
debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print(data, end=' ')
if data[-1] != '\n':
print()
debugFooter(s)
# send the payload
r.send(data)
# read response line
code, msg, headers = r.getreply()
self.cookies = http.cookies.SimpleCookie();
if headers:
content_type = headers.get("content-type","text/xml")
content_length = headers.get("Content-length")
for cookie in headers.getallmatchingheaders("Set-Cookie"):
self.cookies.load(cookie);
else:
content_type=None
content_length=None
# work around OC4J bug which does '<len>, <len>' for some reaason
if content_length:
comma=content_length.find(',')
if comma>0:
content_length = content_length[:comma]
# attempt to extract integer message size
try:
message_len = int(content_length)
except:
message_len = -1
f = r.getfile()
if f is None:
raise HTTPError(code, "Empty response from server\nCode: %s\nHeaders: %s" % (msg, headers))
if message_len < 0:
# Content-Length missing or invalid; just read the whole socket
# This won't work with HTTP/1.1 chunked encoding
data = f.read()
message_len = len(data)
else:
data = f.read(message_len)
if(config.debug):
print("code=",code)
print("msg=", msg)
print("headers=", headers)
print("content-type=", content_type)
print("data=", data)
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
if headers.headers:
print("HTTP/1.? %d %s" % (code, msg))
print("\n".join([x.strip() for x in headers.headers]))
else:
print("HTTP/0.9 %d %s" % (code, msg))
debugFooter(s)
def startswith(string, val):
return string[0:len(val)] == val
if code == 500 and not \
( startswith(content_type, "text/xml") and message_len > 0 ):
raise HTTPError(code, msg)
if config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print(data, end=' ')
if (len(data)>0) and (data[-1] != '\n'):
print()
debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
# return response payload
return data, new_ns
################################################################################
# SOAP Proxy
################################################################################
class SOAPProxy:
def __init__(self, proxy, namespace = None, soapaction = None,
header = None, methodattrs = None, transport = HTTPTransport,
encoding = 'UTF-8', throw_faults = 1, unwrap_results = None,
http_proxy=None, config = Config, noroot = 0,
simplify_objects=None, timeout=None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
# get default values for unwrap_results and simplify_objects
# from config
if unwrap_results is None:
self.unwrap_results=config.unwrap_results
else:
self.unwrap_results=unwrap_results
if simplify_objects is None:
self.simplify_objects=config.simplify_objects
else:
self.simplify_objects=simplify_objects
self.proxy = SOAPAddress(proxy, config)
self.namespace = namespace
self.soapaction = soapaction
self.header = header
self.methodattrs = methodattrs
self.transport = transport()
self.encoding = encoding
self.throw_faults = throw_faults
self.http_proxy = http_proxy
self.config = config
self.noroot = noroot
self.timeout = timeout
# GSI Additions
if hasattr(config, "channel_mode") and \
hasattr(config, "delegation_mode"):
self.channel_mode = config.channel_mode
self.delegation_mode = config.delegation_mode
#end GSI Additions
def invoke(self, method, args):
return self.__call(method, args, {})
def __call(self, name, args, kw, ns = None, sa = None, hd = None,
ma = None):
ns = ns or self.namespace
ma = ma or self.methodattrs
if sa: # Get soapaction
if type(sa) == TupleType:
sa = sa[0]
else:
if self.soapaction:
sa = self.soapaction
else:
sa = name
if hd: # Get header
if type(hd) == TupleType:
hd = hd[0]
else:
hd = self.header
hd = hd or self.header
if ma: # Get methodattrs
if type(ma) == TupleType: ma = ma[0]
else:
ma = self.methodattrs
ma = ma or self.methodattrs
m = buildSOAP(args = args, kw = kw, method = name, namespace = ns,
header = hd, methodattrs = ma, encoding = self.encoding,
config = self.config, noroot = self.noroot)
call_retry = 0
try:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config,
timeout = self.timeout)
except socket.timeout:
raise SOAPTimeoutError
except Exception as ex:
#
# Call failed.
#
# See if we have a fault handling vector installed in our
# config. If we do, invoke it. If it returns a true value,
# retry the call.
#
# In any circumstance other than the fault handler returning
# true, reraise the exception. This keeps the semantics of this
# code the same as without the faultHandler code.
#
if hasattr(self.config, "faultHandler"):
if isinstance(self.config.faultHandler, collections.Callable):
call_retry = self.config.faultHandler(self.proxy, ex)
if not call_retry:
raise
else:
raise
else:
raise
if call_retry:
try:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config,
timeout = self.timeout)
except socket.timeout:
raise SOAPTimeoutError
p, attrs = parseSOAPRPC(r, attrs = 1)
try:
throw_struct = self.throw_faults and \
isinstance (p, faultType)
except:
throw_struct = 0
if throw_struct:
if self.config.debug:
print(p)
raise p
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
if self.unwrap_results:
try:
count = 0
for i in list(p.__dict__.keys()):
if i[0] != "_": # don't count the private stuff
count += 1
t = getattr(p, i)
if count == 1: # Only one piece of data, bubble it up
p = t
except:
pass
# Automatically simplfy SOAP complex types into the
# corresponding python types. (structType --> dict,
# arrayType --> array, etc.)
if self.simplify_objects:
p = simplify(p)
if self.config.returnAllAttrs:
return p, attrs
return p
def _callWithBody(self, body):
return self.__call(None, body, {})
def __getattr__(self, name): # hook to catch method calls
if name in ( '__del__', '__getinitargs__', '__getnewargs__',
'__getstate__', '__setstate__', '__reduce__', '__reduce_ex__'):
raise AttributeError(name)
return self.__Method(self.__call, name, config = self.config)
# To handle attribute weirdness
class __Method:
# Some magic to bind a SOAP method to an RPC server.
# Supports "nested" methods (e.g. examples.getStateName) -- concept
# borrowed from xmlrpc/soaplib -- www.pythonware.com
# Altered (improved?) to let you inline namespaces on a per call
# basis ala SOAP::LITE -- www.soaplite.com
def __init__(self, call, name, ns = None, sa = None, hd = None,
ma = None, config = Config):
self.__call = call
self.__name = name
self.__ns = ns
self.__sa = sa
self.__hd = hd
self.__ma = ma
self.__config = config
return
def __call__(self, *args, **kw):
if self.__name[0] == "_":
if self.__name in ["__repr__","__str__"]:
return self.__repr__()
else:
return self.__f_call(*args, **kw)
else:
return self.__r_call(*args, **kw)
def __getattr__(self, name):
if name == '__del__':
raise AttributeError(name)
if self.__name[0] == "_":
# Don't nest method if it is a directive
return self.__class__(self.__call, name, self.__ns,
self.__sa, self.__hd, self.__ma)
return self.__class__(self.__call, "%s.%s" % (self.__name, name),
self.__ns, self.__sa, self.__hd, self.__ma)
def __f_call(self, *args, **kw):
if self.__name == "_ns": self.__ns = args
elif self.__name == "_sa": self.__sa = args
elif self.__name == "_hd": self.__hd = args
elif self.__name == "_ma": self.__ma = args
return self
def __r_call(self, *args, **kw):
return self.__call(self.__name, args, kw, self.__ns, self.__sa,
self.__hd, self.__ma)
def __repr__(self):
return "<%s at %d>" % (self.__class__, id(self))
|
cmsdaq/hltd
|
lib/SOAPpy-py3-0.52.24/src/SOAPpy/Client.py
|
Python
|
lgpl-3.0
| 23,216
|
[
"Brian"
] |
8fbf0cfea68d0ced4761ea7da7d36086d0d5465f27aa4d14aec7f40b50b21f93
|
# -*- coding: utf-8 -*-
import numpy as np
from ..constants import periodic_table, scattering_lengths
class Atom(object):
r"""Class for adding atoms to the Material class.
Parameters
----------
ion : string
The name of the Atom, or ion if necessary
pos : list(3)
The position of the Atom in the chosen geometry
dpos : list(3), optional
Deviations from the position pos
occupancy: float, optional
Occupancy of the _Atom (*e.g.* if there is partial occupancy from
doping)
Mcell : float, optional
The mass of the unit cell. If assigned, normalize scattering lengths to
the square-root of the mass of the atom
Returns
-------
output : object
Atom object defining an individual atom in a unit cell of a single
crystal
"""
def __init__(self, ion, pos, occupancy=1., Mcell=None, massNorm=False, Uiso=0, Uaniso=np.zeros((3, 3))):
self.ion = ion
self.pos = np.array(pos)
self.occupancy = occupancy
self.Mcell = Mcell
self.Uiso = Uiso
self.Uaniso = np.matrix(Uaniso)
if isinstance(scattering_lengths()[ion]['Coh b'], list):
b = complex(*scattering_lengths()[ion]['Coh b'])
else:
b = scattering_lengths()[ion]['Coh b']
if massNorm is True:
self.mass = periodic_table()[ion]['mass']
self.b = (b * self.occupancy * self.Mcell / np.sqrt(self.mass))
else:
self.b = b / 10.
self.coh_xs = scattering_lengths()[ion]['Coh xs']
self.inc_xs = scattering_lengths()[ion]['Inc xs']
self.abs_xs = scattering_lengths()[ion]['Abs xs']
def __repr__(self):
return "Atom('{0}')".format(self.ion)
class MagneticAtom(object):
r"""Class for adding magnetic atoms to the Material class.
Parameters
----------
ion : str
The name of the ion
pos : list(3)
The position of the atom in r.l.u.
Return
------
output : object
MagneticAtom object defining an individual magnetic ion in a unit cell
"""
def __init__(self, ion, pos, moment, occupancy):
self.ion = ion
self.pos = np.array(pos)
self.moment = moment
self.occupancy = occupancy
def __repr__(self):
return "MagneticAtom('{0}')".format(self.ion, self.pos, self.moment, self.occupancy)
|
granrothge/neutronpy
|
neutronpy/crystal/atom.py
|
Python
|
mit
| 2,433
|
[
"CRYSTAL",
"MCell"
] |
7803882183e9593714f2a9473a7ebde9cec1008735b0fc1039dc7bb303883aa9
|
#ImportModules
import ShareYourSystem as SYS
import operator
#Definition of a brian structure
MyPopulater=SYS.PopulaterClass().populate(
**{
'PopulatingUnitsInt':500
}
)
#Definition the AttestedStr
SYS._attest(
[
'MyPopulater is '+SYS._str(
MyPopulater,
**{
'RepresentingBaseKeyStrsListBool':False,
'RepresentingAlineaIsBool':False
}
),
]
)
#Print
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Simulaters/Populater/01_ExampleCell.py
|
Python
|
mit
| 393
|
[
"Brian"
] |
55795209f8dc0e6ba0cd8897328f07a0ff8ee95bff71abe3eafbfb678d8bd73c
|
"""
This is a random gaussian noise generator module type 3.
This module generates the signal with oversampling=1, then oversamples the signal
to the wanted representation sampling frequency and finally it upconverts the signals, |br|
The generator is able to generate N random signals with a random frequency position within
a given min and max boundaries. |br|
*Examples*:
Please go to the *examples/signals* directory for examples on how to use
the generator. |br|
*Settings*:
Parameters of the generator described below.
Take a look on '__parametersDefine' function for more info on the
parameters.
Parameters of the generator are attributes of the class which must/can
be set before the generator run.
Required parameters:
- a. **tS** (*float*): time of a signals
- b. **fR** (*float*): signals' representation sampling frequency
- c. ****
Optional parameters:
- c. **fMin** (*float*): minimum frequency component in the signal
[default = not regulated]
- d. **fMax** (*float*): maximum frequency component in the signal
[default = not regulated]
- e. **iP** (*float*): signals' power [default = 1W]
- f. **nSigs** (*int*): the number of signals to be generated
[default = 1]
- k. **bMute** (*int*): mute the console output from the generator
[default = 0]
*Output*:
Description of the generator output is below.
This is the list of attributes of the generator class which are available
after calling the 'run' method:
- a. **mSig** (*Numpy array 2D*): Matrix with output signals,
one signal p. row
- b. **nSmp** (*int*): The number of samples in the signals
- c. **vP** (*Numpy array 1D*): Vector with the power of signals
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
0.01 | 15-MAR-2016 : * Version 1.0 released. |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import numpy as np
import rxcs
class gaussNoise3(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
self.strRxCSgroup = 'Signal generator' # Name of group of RxCS modules
self.strModuleName = 'Random gaussian noise (type 3)' # Module name
self.__parametersDefine() # Define the parameters
# Import tools
self.gaussNoise = rxcs.sig.gaussNoise() # Import basic gaussian noise generator
self.oversampler = rxcs.sig.oversampler() # Import oversampler
self.upconvert = rxcs.sig.radio.upconvert() # Upconversion
self.powerRegulator = rxcs.sig.powerRegulator() # Power regulator
def __parametersDefine(self):
"""
Internal method which defines the parameters
"""
# Representation sampling frequency
self.paramAddMan('fR', 'Representation sampling frequency', unit='Hz')
self.paramType('fR', (int, float))
self.paramH('fR', 0) # Rep. samp. freq. must be higher than zero
self.paramL('fR', np.inf) # ...and lower than infinity
# Time of signal
self.paramAddMan('tS', 'Signal time', unit='s')
self.paramType('tS', (float, int))
self.paramH('tS', 0) # Time must be higher than zero
self.paramL('tS', np.inf) # ...and lower than infinity
# The lowest possible frequency component of the signal
self.paramAddMan('fMin', 'The lowest possible frequency component of the signal', unit='Hz')
self.paramType('fMin', (float, int))
self.paramHE('fMin', 0)
self.paramL('fMin', 'fMax')
# The highest possible frequency component of the signal
self.paramAddMan('fMax', 'The highest possible frequency component of the signal', unit='Hz')
self.paramType('fMax', (float, int))
self.paramH('fMax', 0)
self.paramLE('fMax', 'fR', mul=0.5)
# The frequency width of the signal
self.paramAddMan('fWidth', 'Frequency width of the signal', unit='Hz')
self.paramType('fWidth', (float, int))
self.paramH('fWidth', 0)
# The frequency gradation of the allowed spectrum
self.paramAddMan('fGrad', 'The frequency gradation of the allowed spectrum', unit='Hz')
self.paramType('fGrad', (float, int))
self.paramH('fGrad', 0)
# Power of a signal
self.paramAddOpt('iP', 'Signal power', unit='W', default=1)
self.paramType('iP',(float, int))
self.paramH('iP', 0) # Power of the signal must be higher than zero
self.paramL('iP', np.inf) # ...and lower than infinity
# The number of signals
self.paramAddOpt('nSigs', 'The number of signals', unit='', default=1)
self.paramType('nSigs',(int))
self.paramH('nSigs', 0) # The number of signals must be higher than zero
self.paramL('nSigs', np.inf) # ...and lower than infinity
# --------------------------------------------------------------------
# Mute the output flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute',[0, 1]) # It can be either 1 or 0
def run(self):
"""
Run method, which starts the generator
"""
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.parametersPrint() # Print the values of parameters
self.engineStartsInfo() # Info that the engine starts
self.__engine() # Run the engine
self.engineStopsInfo() # Info that the engine ends
return self.__dict__ # Return dictionary with the parameters
def __engine(self):
"""
Engine of the function
"""
# Check if there is enough space between the max and minimum frequency
if ((self.fMax - self.fMin) < self.fWidth):
strError = 'There is not enought space between the max and minimum frequncy!'
raise ValueError(strError)
# Compute the number of possible positions ofsignals
iNPos = int(np.floor((self.fMax - self.fMin - self.fWidth)/self.fGrad)) + 1
# Allocate matrix for signals
self.mSig_ = np.nan*np.ones((self.nSigs, int(np.round(self.fR*self.tS))))
# Generate the basic signals
self.gaussNoise.fR = 2*self.fWidth
self.gaussNoise.tS = self.tS
self.gaussNoise.iP = 1
self.gaussNoise.nSigs = self.nSigs
self.gaussNoise.bMute = 1
self.gaussNoise.fMax = self.fWidth/2
self.gaussNoise.strFilt = 'butter'
self.gaussNoise.nFiltOrd = 30
self.gaussNoise.iRs = 100
self.gaussNoise.run()
# Oversample the basic signals
self.oversampler.mSig = self.gaussNoise.mSig
self.oversampler.iFLow = 2*self.fWidth
self.oversampler.iFHigh = self.fR
self.oversampler.bMute = 1
self.oversampler.run()
# Upconvert
self.upconvert.fR = self.fR
self.upconvert.tS = self.tS
self.upconvert.bMute = 1
vPos = np.random.randint(0, iNPos, self.nSigs) # Draw positions of the signals
for iInxSig in range(self.nSigs):
fC = self.fMin + self.fWidth/2 + vPos[iInxSig] * self.fGrad
self.upconvert.fC = fC
self.upconvert.mSig = np.atleast_2d(self.oversampler.mSigOversamp[iInxSig, :])
self.upconvert.run()
self.mSig_[iInxSig, :] = self.upconvert.mSig[0, :]
# Regulate the power of the signal and assign the signal with the regulated power
# as the output signal
self.powerRegulator.mSig = self.mSig_
self.powerRegulator.iP = self.iP
self.powerRegulator.bMute = 1
self.powerRegulator.run()
self.mSig = self.powerRegulator.mSigOut
self.vP = self.powerRegulator.vP
# Compute the number of samples in the output signal
self.nSmp = int(np.round(self.fR * self.tS))
return
|
JacekPierzchlewski/RxCS
|
rxcs/sig/gaussNoise3.py
|
Python
|
bsd-2-clause
| 8,416
|
[
"Gaussian"
] |
1db682299f0a0c1db986b3aea9f28dce802aa2849a1645fddafd679a120634e3
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
import collections
import numbers
from functools import partial
import numpy as np
import scipy.constants as const
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
"""
Some conversion factors
"""
Ha_to_eV = 1 / const.physical_constants["electron volt-hartree relationship"][0]
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = const.physical_constants["atomic mass unit-kilogram relationship"][0]
mile_to_meters = const.mile
bohr_to_angstrom = const.physical_constants["Bohr radius"][0] * 1e10
bohr_to_ang = bohr_to_angstrom
ang_to_bohr = 1 / bohr_to_ang
kCal_to_kJ = const.calorie
kb = const.physical_constants["Boltzmann constant in eV/K"][0]
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
"d": 3600 * 24,
},
"current": {"A": 1},
"temperature": {
"K": 1,
},
"amount": {"mol": 1, "atom": 1 / const.N_A},
"intensity": {"cd": 1},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024**2,
"Gb": 1024**3,
"Tb": 1024**4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v for k, v in BASE_UNITS["memory"].items()})
# This current list are supported derived units defined in terms of powers of
# SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, const.e: 1},
"meV": {"kg": 1, "m": 2, "s": -2, const.e * 1e-3: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, const.e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, const.e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1},
"kCal": {"kg": 1, "m": 2, "s": -2, 1000: 1, kCal_to_kJ: 1},
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, const.e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2},
"KN": {"kg": 1, "m": 1, "s": -2, 1000: 1},
"MN": {"kg": 1, "m": 1, "s": -2, 1e6: 1},
"GN": {"kg": 1, "m": 1, "s": -2, 1e9: 1},
},
"frequency": {
"Hz": {"s": -1},
"KHz": {"s": -1, 1000: 1},
"MHz": {"s": -1, 1e6: 1},
"GHz": {"s": -1, 1e9: 1},
"THz": {"s": -1, 1e12: 1},
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1},
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3},
"KW": {"m": 2, "kg": 1, "s": -3, 1000: 1},
"MW": {"m": 2, "kg": 1, "s": -3, 1e6: 1},
"GW": {"m": 2, "kg": 1, "s": -3, 1e9: 1},
},
"emf": {"V": {"m": 2, "kg": 1, "s": -3, "A": -1}},
"capacitance": {"F": {"m": -2, "kg": -1, "s": 4, "A": 2}},
"resistance": {"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}},
"conductance": {"S": {"m": -2, "kg": -1, "s": 3, "A": 2}},
"magnetic_flux": {"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}},
"cross_section": {"barn": {"m": 2, 1e-28: 1}, "mbarn": {"m": 2, 1e-31: 1}},
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items())) # type: ignore
SUPPORTED_UNIT_NAMES = tuple(i for d in ALL_UNITS.values() for i in d.keys())
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {} # type: ignore
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1, BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def _check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all(v2.get(ku, 0) == vu for ku, vu in u.items()) and all(
u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()
):
return {k2: 1}
return u
class Unit(collections.abc.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, str):
unit = collections.defaultdict(int)
import re
for m in re.finditer(r"([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = _check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(), key=lambda k: (-self._unit[k], k))
return " ".join(
[f"{k}^{self._unit[k]}" if self._unit[k] != 1 else k for k in sorted_keys if self._unit[k] != 0]
)
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, numbers.Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f**v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(), key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(), key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError(f"Units {uo} and {un} are not compatible!")
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception(f"Unit is missing in string {s}")
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
"""Overrides __new__ since we are subclassing a Python primitive/"""
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(f"{unit} is not a supported unit for {unit_type}")
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super().__repr__()
def __str__(self):
s = super().__str__()
return f"{s} {self._unit}"
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super().__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type, unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super().__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type, unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None, unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None, unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None, unit=self._unit**i)
def __truediv__(self, other):
val = super().__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type, unit=self._unit)
return FloatWithUnit(val, unit_type=None, unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super().__neg__(), unit_type=self._unit_type, unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
# print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
# print("in getstate %s" % state)
return state
def __setstate__(self, state):
# print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit,
)
@property
def as_base_units(self):
"""
Returns this FloatWithUnit in base SI units, including derived units.
Returns:
A FloatWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
"""
Override __new__.
"""
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def __reduce__(self):
# print("in reduce")
reduce = list(super().__reduce__())
# print("unit",self._unit)
# print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
# pylint: disable=E1101
super().__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return f"{np.array(self).__repr__()} {self.unit}"
def __str__(self):
return f"{np.array(self).__str__()} {self.unit}"
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other), unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other), unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(np.array(self).__mul__(np.array(other)), unit=self.unit * other.unit)
def __rmul__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__rmul__(np.array(other)), unit=self.unit * other.unit)
def __div__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__div__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__div__(np.array(other)), unit=self.unit / other.unit)
def __truediv__(self, other):
# pylint: disable=E1101
if not hasattr(other, "unit_type"):
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type,
unit=self._unit,
)
return self.__class__(np.array(self).__truediv__(np.array(other)), unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(), unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type,
unit=new_unit,
)
@property
def as_base_units(self):
"""
Returns this ArrayWithUnit in base SI units, including derived units.
Returns:
An ArrayWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
# TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
# TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class
methods defined in FloatWithUnit. This function calls partial and patches
the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
if isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k, v in obj.items()}
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. You can also
use it to standardize the output units of a function that already returns
a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences
are assigned the same unit. It works with Python sequences only. The creation
of numpy arrays loses all unit information. For mapping types, the values
are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, (FloatWithUnit, ArrayWithUnit)):
return val.to(unit)
if isinstance(val, collections.abc.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type, unit=unit) for i in val])
if isinstance(val, collections.abc.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError(f"Don't know how to assign units to {str(val)}")
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
|
materialsproject/pymatgen
|
pymatgen/core/units.py
|
Python
|
mit
| 27,123
|
[
"pymatgen"
] |
ca6cc31a2d2b9a5bc8c44c17e2250efbfcde153212e19c77ea727a942bb4b4f8
|
from math import floor
from world import World
import Queue
import SocketServer
import datetime
import random
import re
import requests
import sqlite3
import sys
import threading
import time
import traceback
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 4080
DB_PATH = 'craft.db'
LOG_PATH = 'log.txt'
CHUNK_SIZE = 32
BUFFER_SIZE = 4096
COMMIT_INTERVAL = 5
AUTH_REQUIRED = True
AUTH_URL = 'https://craft.michaelfogleman.com/api/1/access'
DAY_LENGTH = 600
SPAWN_POINT = (0, 0, 0, 0, 0)
RATE_LIMIT = False
RECORD_HISTORY = False
INDESTRUCTIBLE_ITEMS = set([16])
ALLOWED_ITEMS = set([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])
AUTHENTICATE = 'A'
BLOCK = 'B'
CHUNK = 'C'
DISCONNECT = 'D'
KEY = 'K'
LIGHT = 'L'
NICK = 'N'
POSITION = 'P'
REDRAW = 'R'
SIGN = 'S'
TALK = 'T'
TIME = 'E'
VERSION = 'V'
YOU = 'U'
try:
from config import *
except ImportError:
pass
def log(*args):
now = datetime.datetime.utcnow()
line = ' '.join(map(str, (now,) + args))
print line
with open(LOG_PATH, 'a') as fp:
fp.write('%s\n' % line)
def chunked(x):
return int(floor(round(x) / CHUNK_SIZE))
def packet(*args):
return '%s\n' % ','.join(map(str, args))
class RateLimiter(object):
def __init__(self, rate, per):
self.rate = float(rate)
self.per = float(per)
self.allowance = self.rate
self.last_check = time.time()
def tick(self):
if not RATE_LIMIT:
return False
now = time.time()
elapsed = now - self.last_check
self.last_check = now
self.allowance += elapsed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
if self.allowance < 1:
return True # too fast
else:
self.allowance -= 1
return False # okay
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
daemon_threads = True
class Handler(SocketServer.BaseRequestHandler):
def setup(self):
self.position_limiter = RateLimiter(100, 5)
self.limiter = RateLimiter(1000, 10)
self.version = None
self.client_id = None
self.user_id = None
self.nick = None
self.queue = Queue.Queue()
self.running = True
self.start()
def handle(self):
model = self.server.model
model.enqueue(model.on_connect, self)
try:
buf = []
while True:
data = self.request.recv(BUFFER_SIZE)
if not data:
break
buf.extend(data.replace('\r\n', '\n'))
while '\n' in buf:
index = buf.index('\n')
line = ''.join(buf[:index])
buf = buf[index + 1:]
if not line:
continue
if line[0] == POSITION:
if self.position_limiter.tick():
log('RATE', self.client_id)
self.stop()
return
else:
if self.limiter.tick():
log('RATE', self.client_id)
self.stop()
return
model.enqueue(model.on_data, self, line)
finally:
model.enqueue(model.on_disconnect, self)
def finish(self):
self.running = False
def stop(self):
self.request.close()
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
while self.running:
try:
buf = []
try:
buf.append(self.queue.get(timeout=5))
try:
while True:
buf.append(self.queue.get(False))
except Queue.Empty:
pass
except Queue.Empty:
continue
data = ''.join(buf)
self.request.sendall(data)
except Exception:
self.request.close()
raise
def send_raw(self, data):
if data:
self.queue.put(data)
def send(self, *args):
self.send_raw(packet(*args))
class Model(object):
def __init__(self, seed):
self.world = World(seed)
self.clients = []
self.queue = Queue.Queue()
self.commands = {
AUTHENTICATE: self.on_authenticate,
CHUNK: self.on_chunk,
BLOCK: self.on_block,
LIGHT: self.on_light,
POSITION: self.on_position,
TALK: self.on_talk,
SIGN: self.on_sign,
VERSION: self.on_version,
}
self.patterns = [
(re.compile(r'^/nick(?:\s+([^,\s]+))?$'), self.on_nick),
(re.compile(r'^/spawn$'), self.on_spawn),
(re.compile(r'^/goto(?:\s+(\S+))?$'), self.on_goto),
(re.compile(r'^/pq\s+(-?[0-9]+)\s*,?\s*(-?[0-9]+)$'), self.on_pq),
(re.compile(r'^/help(?:\s+(\S+))?$'), self.on_help),
(re.compile(r'^/list$'), self.on_list),
]
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
self.connection = sqlite3.connect(DB_PATH)
self.create_tables()
self.commit()
while True:
try:
if time.time() - self.last_commit > COMMIT_INTERVAL:
self.commit()
self.dequeue()
except Exception:
traceback.print_exc()
def enqueue(self, func, *args, **kwargs):
self.queue.put((func, args, kwargs))
def dequeue(self):
try:
func, args, kwargs = self.queue.get(timeout=5)
func(*args, **kwargs)
except Queue.Empty:
pass
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def commit(self):
self.last_commit = time.time()
self.connection.commit()
def create_tables(self):
queries = [
'create table if not exists block ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists block_pqxyz_idx on '
' block (p, q, x, y, z);',
'create table if not exists light ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists light_pqxyz_idx on '
' light (p, q, x, y, z);',
'create table if not exists sign ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' face int not null,'
' text text not null'
');',
'create index if not exists sign_pq_idx on sign (p, q);',
'create unique index if not exists sign_xyzface_idx on '
' sign (x, y, z, face);',
'create table if not exists block_history ('
' timestamp real not null,'
' user_id int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
]
for query in queries:
self.execute(query)
def get_default_block(self, x, y, z):
p, q = chunked(x), chunked(z)
chunk = self.world.get_chunk(p, q)
return chunk.get((x, y, z), 0)
def get_block(self, x, y, z):
query = (
'select w from block where '
'p = :p and q = :q and x = :x and y = :y and z = :z;'
)
p, q = chunked(x), chunked(z)
rows = list(self.execute(query, dict(p=p, q=q, x=x, y=y, z=z)))
if rows:
return rows[0][0]
return self.get_default_block(x, y, z)
def next_client_id(self):
result = 1
client_ids = set(x.client_id for x in self.clients)
while result in client_ids:
result += 1
return result
def on_connect(self, client):
client.client_id = self.next_client_id()
client.nick = 'guest%d' % client.client_id
log('CONN', client.client_id, *client.client_address)
client.position = SPAWN_POINT
self.clients.append(client)
client.send(YOU, client.client_id, *client.position)
client.send(TIME, time.time(), DAY_LENGTH)
client.send(TALK, 'Welcome to Craft!')
client.send(TALK, 'Type "/help" for a list of commands.')
self.send_position(client)
self.send_positions(client)
self.send_nick(client)
self.send_nicks(client)
def on_data(self, client, data):
#log('RECV', client.client_id, data)
args = data.split(',')
command, args = args[0], args[1:]
if command in self.commands:
func = self.commands[command]
func(client, *args)
def on_disconnect(self, client):
log('DISC', client.client_id, *client.client_address)
self.clients.remove(client)
self.send_disconnect(client)
self.send_talk('%s has disconnected from the server.' % client.nick)
def on_version(self, client, version):
if client.version is not None:
return
version = int(version)
if version != 1:
client.stop()
return
client.version = version
# TODO: client.start() here
def on_authenticate(self, client, username, access_token):
user_id = None
if username and access_token:
payload = {
'username': username,
'access_token': access_token,
}
response = requests.post(AUTH_URL, data=payload)
if response.status_code == 200 and response.text.isdigit():
user_id = int(response.text)
client.user_id = user_id
if user_id is None:
client.nick = 'guest%d' % client.client_id
client.send(TALK, 'Visit craft.michaelfogleman.com to register!')
else:
client.nick = username
self.send_nick(client)
# TODO: has left message if was already authenticated
self.send_talk('%s has joined the game.' % client.nick)
def on_chunk(self, client, p, q, key=0):
packets = []
p, q, key = map(int, (p, q, key))
query = (
'select rowid, x, y, z, w from block where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
max_rowid = 0
blocks = 0
for rowid, x, y, z, w in rows:
blocks += 1
packets.append(packet(BLOCK, p, q, x, y, z, w))
max_rowid = max(max_rowid, rowid)
query = (
'select x, y, z, w from light where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
lights = 0
for x, y, z, w in rows:
lights += 1
packets.append(packet(LIGHT, p, q, x, y, z, w))
query = (
'select x, y, z, face, text from sign where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
signs = 0
for x, y, z, face, text in rows:
signs += 1
packets.append(packet(SIGN, p, q, x, y, z, face, text))
if blocks:
packets.append(packet(KEY, p, q, max_rowid))
if blocks or lights or signs:
packets.append(packet(REDRAW, p, q))
packets.append(packet(CHUNK, p, q))
client.send_raw(''.join(packets))
def on_block(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
previous = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif w not in ALLOWED_ITEMS:
message = 'That item is not allowed.'
elif w and previous:
message = 'Cannot create blocks in a non-empty space.'
elif not w and not previous:
message = 'That space is already empty.'
elif previous in INDESTRUCTIBLE_ITEMS:
message = 'Cannot destroy that type of block.'
if message is not None:
client.send(BLOCK, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert into block_history (timestamp, user_id, x, y, z, w) '
'values (:timestamp, :user_id, :x, :y, :z, :w);'
)
if RECORD_HISTORY:
self.execute(query, dict(timestamp=time.time(),
user_id=client.user_id, x=x, y=y, z=z, w=w))
query = (
'insert or replace into block (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_block(client, p, q, x, y, z, w)
for dx in range(-1, 2):
for dz in range(-1, 2):
if dx == 0 and dz == 0:
continue
if dx and chunked(x + dx) == p:
continue
if dz and chunked(z + dz) == q:
continue
np, nq = p + dx, q + dz
self.execute(query, dict(p=np, q=nq, x=x, y=y, z=z, w=-w))
self.send_block(client, np, nq, x, y, z, -w)
if w == 0:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update light set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
def on_light(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif block == 0:
message = 'Lights must be placed on a block.'
elif w < 0 or w > 15:
message = 'Invalid light value.'
if message is not None:
# TODO: client.send(LIGHT, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into light (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_light(client, p, q, x, y, z, w)
def on_sign(self, client, x, y, z, face, *args):
if AUTH_REQUIRED and client.user_id is None:
client.send(TALK, 'Only logged in users are allowed to build.')
return
text = ','.join(args)
x, y, z, face = map(int, (x, y, z, face))
if y <= 0 or y > 255:
return
if face < 0 or face > 7:
return
if len(text) > 48:
return
p, q = chunked(x), chunked(z)
if text:
query = (
'insert or replace into sign (p, q, x, y, z, face, text) '
'values (:p, :q, :x, :y, :z, :face, :text);'
)
self.execute(query,
dict(p=p, q=q, x=x, y=y, z=z, face=face, text=text))
else:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z and face = :face;'
)
self.execute(query, dict(x=x, y=y, z=z, face=face))
self.send_sign(client, p, q, x, y, z, face, text)
def on_position(self, client, x, y, z, rx, ry):
x, y, z, rx, ry = map(float, (x, y, z, rx, ry))
client.position = (x, y, z, rx, ry)
self.send_position(client)
def on_talk(self, client, *args):
text = ','.join(args)
if text.startswith('/'):
for pattern, func in self.patterns:
match = pattern.match(text)
if match:
func(client, *match.groups())
break
else:
client.send(TALK, 'Unrecognized command: "%s"' % text)
elif text.startswith('@'):
nick = text[1:].split(' ', 1)[0]
for other in self.clients:
if other.nick == nick:
client.send(TALK, '%s> %s' % (client.nick, text))
other.send(TALK, '%s> %s' % (client.nick, text))
break
else:
client.send(TALK, 'Unrecognized nick: "%s"' % nick)
else:
self.send_talk('%s> %s' % (client.nick, text))
def on_nick(self, client, nick=None):
if AUTH_REQUIRED:
client.send(TALK, 'You cannot change your nick on this server.')
return
if nick is None:
client.send(TALK, 'Your nickname is %s' % client.nick)
else:
self.send_talk('%s is now known as %s' % (client.nick, nick))
client.nick = nick
self.send_nick(client)
def on_spawn(self, client):
client.position = SPAWN_POINT
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_goto(self, client, nick=None):
if nick is None:
clients = [x for x in self.clients if x != client]
other = random.choice(clients) if clients else None
else:
nicks = dict((client.nick, client) for client in self.clients)
other = nicks.get(nick)
if other:
client.position = other.position
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_pq(self, client, p, q):
p, q = map(int, (p, q))
if abs(p) > 1000 or abs(q) > 1000:
return
client.position = (p * CHUNK_SIZE, 0, q * CHUNK_SIZE, 0, 0)
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_help(self, client, topic=None):
if topic is None:
client.send(TALK, 'Type "t" to chat. Type "/" to type commands:')
client.send(TALK, '/goto [NAME], /help [TOPIC], /list, /login NAME, /logout, /nick')
client.send(TALK, '/offline [FILE], /online HOST [PORT], /pq P Q, /spawn, /view N')
return
topic = topic.lower().strip()
if topic == 'goto':
client.send(TALK, 'Help: /goto [NAME]')
client.send(TALK, 'Teleport to another user.')
client.send(TALK, 'If NAME is unspecified, a random user is chosen.')
elif topic == 'list':
client.send(TALK, 'Help: /list')
client.send(TALK, 'Display a list of connected users.')
elif topic == 'login':
client.send(TALK, 'Help: /login NAME')
client.send(TALK, 'Switch to another registered username.')
client.send(TALK, 'The login server will be re-contacted. The username is case-sensitive.')
elif topic == 'logout':
client.send(TALK, 'Help: /logout')
client.send(TALK, 'Unauthenticate and become a guest user.')
client.send(TALK, 'Automatic logins will not occur again until the /login command is re-issued.')
elif topic == 'offline':
client.send(TALK, 'Help: /offline [FILE]')
client.send(TALK, 'Switch to offline mode.')
client.send(TALK, 'FILE specifies the save file to use and defaults to "craft".')
elif topic == 'online':
client.send(TALK, 'Help: /online HOST [PORT]')
client.send(TALK, 'Connect to the specified server.')
elif topic == 'nick':
client.send(TALK, 'Help: /nick [NICK]')
client.send(TALK, 'Get or set your nickname.')
elif topic == 'pq':
client.send(TALK, 'Help: /pq P Q')
client.send(TALK, 'Teleport to the specified chunk.')
elif topic == 'spawn':
client.send(TALK, 'Help: /spawn')
client.send(TALK, 'Teleport back to the spawn point.')
elif topic == 'view':
client.send(TALK, 'Help: /view N')
client.send(TALK, 'Set viewing distance, 1 - 24.')
def on_list(self, client):
client.send(TALK,
'Players: %s' % ', '.join(x.nick for x in self.clients))
def send_positions(self, client):
for other in self.clients:
if other == client:
continue
client.send(POSITION, other.client_id, *other.position)
def send_position(self, client):
for other in self.clients:
if other == client:
continue
other.send(POSITION, client.client_id, *client.position)
def send_nicks(self, client):
for other in self.clients:
if other == client:
continue
client.send(NICK, other.client_id, other.nick)
def send_nick(self, client):
for other in self.clients:
other.send(NICK, client.client_id, client.nick)
def send_disconnect(self, client):
for other in self.clients:
if other == client:
continue
other.send(DISCONNECT, client.client_id)
def send_block(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(BLOCK, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_light(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(LIGHT, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_sign(self, client, p, q, x, y, z, face, text):
for other in self.clients:
if other == client:
continue
other.send(SIGN, p, q, x, y, z, face, text)
def send_talk(self, text):
log(text)
for client in self.clients:
client.send(TALK, text)
def cleanup():
world = World(None)
conn = sqlite3.connect(DB_PATH)
query = 'select x, y, z from block order by rowid desc limit 1;'
last = list(conn.execute(query))[0]
query = 'select distinct p, q from block;'
chunks = list(conn.execute(query))
count = 0
total = 0
delete_query = 'delete from block where x = %d and y = %d and z = %d;'
print 'begin;'
for p, q in chunks:
chunk = world.create_chunk(p, q)
query = 'select x, y, z, w from block where p = :p and q = :q;'
rows = conn.execute(query, {'p': p, 'q': q})
for x, y, z, w in rows:
if chunked(x) != p or chunked(z) != q:
continue
total += 1
if (x, y, z) == last:
continue
original = chunk.get((x, y, z), 0)
if w == original or original in INDESTRUCTIBLE_ITEMS:
count += 1
print delete_query % (x, y, z)
conn.close()
print 'commit;'
print >> sys.stderr, '%d of %d blocks will be cleaned up' % (count, total)
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'cleanup':
cleanup()
return
host, port = DEFAULT_HOST, DEFAULT_PORT
if len(sys.argv) > 1:
host = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
log('SERV', host, port)
model = Model(None)
model.start()
server = Server((host, port), Handler)
server.model = model
server.serve_forever()
if __name__ == '__main__':
main()
|
fogleman/Craft
|
server.py
|
Python
|
mit
| 24,727
|
[
"VisIt"
] |
ffb1928cfb81222ac346b4674353123c8bc402939078eab65f9d7ac2d7ec4e06
|
# Copyright 2015 - 2018 Altuğ Karakurt & Sertan Şentürk
#
# This file is part of tomato: https://github.com/sertansenturk/tomato/
#
# tomato is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License v3.0
# along with this program. If not, see http://www.gnu.org/licenses/
#
# If you are using this extractor please cite the following paper:
#
# Karakurt, A., Şentürk S., and Serra X. (2016). MORTY: A toolbox for mode
# recognition and tonic identification. In Proceedings of 3rd International
# Digital Libraries for Musicology Workshop (DLfM 2016). pages 9-16,
# New York, NY, USA
import copy
import json
import pickle
import numpy as np
from ...converter import Converter
from ..pitchdistribution import PitchDistribution
from .inputparser import InputParser
from .knn import KNN
class KNNClassifier(InputParser):
def __init__(self, step_size=7.5, kernel_width=15.0, feature_type='pcd',
model=None):
"""--------------------------------------------------------------------
These attributes are wrapped as an object since these are used in both
training and estimation stages and must be consistent in both processes
-----------------------------------------------------------------------
step_size : Step size of the distribution bins
kernel_width : Standart deviation of the gaussian kernel used to
smoothen the distributions. For further details,
see generate_pd() of ModeFunctions.
feature_type : The feature type to be used in training and testing
("pd" for pitch distribution, "pcd" for pitch
class distribution)
model : Pre-trained model
--------------------------------------------------------------------"""
super(KNNClassifier, self).__init__(
step_size=step_size, kernel_width=kernel_width,
feature_type=feature_type, model=model)
def train(self, pitches, tonics, modes, sources=None, model_type='multi'):
if model_type == 'single':
return self._train_single_distrib_per_mode(
pitches, tonics, modes, sources=sources)
if model_type == 'multi':
return self._train_multi_distrib_per_mode(
pitches, tonics, modes, sources=sources)
raise ValueError("Unknown training model")
def _train_single_distrib_per_mode(self, pitches, tonics, modes,
sources=None):
"""--------------------------------------------------------------------
For the mode trainings, the requirements are a set of recordings with
annotated tonics for each mode under consideration. This function only
expects the recordings' pitch tracks and corresponding tonics as lists.
The two lists should be indexed in parallel, so the tonic of ith pitch
track in the pitch track list should be the ith element of tonic list.
Once training is completed for a mode, the model would be generated
as a PitchDistribution object and saved in a JSON file. For loading
these objects and other relevant information about the data structure,
see the PitchDistribution class.
-----------------------------------------------------------------------
pitches : List of pitch tracks or the list of files with
stored pitch tracks (i.e. single-column
lists/numpy arrays/files with frequencies)
tonics : List of annotated tonic frequencies of recordings
modes : Name of the modes of each training sample.
--------------------------------------------------------------------"""
assert len(pitches) == len(modes) == len(tonics), \
'The inputs should have the same length!'
# get the pitch tracks for each mode and convert them to cent unit
tmp_model = {m: {'sources': [], 'cent_pitch': []} for m in set(modes)}
for p, t, m, s in zip(pitches, tonics, modes, sources):
# parse the pitch track from txt file, list or numpy array and
# normalize with respect to annotated tonic
pitch_cent = self._parse_pitch_input(p, t)
# convert to cent track and append to the mode data
tmp_model[m]['cent_pitch'].extend(pitch_cent)
tmp_model[m]['sources'].append(s)
# compute the feature for each model from the normalized pitch tracks
for data_point in tmp_model.values():
data_point['feature'] = PitchDistribution.from_cent_pitch(
data_point.pop('cent_pitch', None),
kernel_width=self.kernel_width, step_size=self.step_size)
# convert to pitch-class distribution if requested
if self.feature_type == 'pcd':
data_point['feature'].to_pcd()
# make the model a list of dictionaries by collapsing the mode keys
# inside the values
model = []
for mode_name, data_point in tmp_model.items():
data_point['mode'] = mode_name
model.append(data_point)
self.model = model
def _train_multi_distrib_per_mode(self, pitches, tonics, modes,
sources=None):
"""--------------------------------------------------------------------
For the mode trainings, the requirements are a set of recordings with
annotated tonics for each mode under consideration. This function only
expects the recordings' pitch tracks and corresponding tonics as lists.
The two lists should be indexed in parallel, so the tonic of ith pitch
track in the pitch track list should be the ith element of tonic list.
Each pitch track would be sliced into chunks of size chunk_size and
their pitch distributions are generated. Then, each of such chunk
distributions are appended to a list. This list represents the mode
by sample points as much as the number of chunks. So, the result is
a list of PitchDistribution objects, i.e. list of structured
dictionaries and this is what is saved.
-----------------------------------------------------------------------
mode_name : Name of the mode to be trained. This is only used for
naming the resultant JSON file, in the form
"mode_name.json"
pitch_files : List of pitch tracks (i.e. 1-D list of frequencies)
tonic_freqs : List of annotated tonics of recordings
feature : Whether the model should be octave wrapped (Pitch Class
Distribution: PCD) or not (Pitch Distribution: PD)
save_dir : Where to save the resultant JSON files.
--------------------------------------------------------------------"""
assert len(pitches) == len(modes) == len(tonics), \
'The inputs should have the same length!'
# get the pitch tracks for each mode and convert them to cent unit
model = []
for p, t, m, s in zip(pitches, tonics, modes, sources):
# parse the pitch track from txt file, list or numpy array and
# normalize with respect to annotated tonic
pitch_cent = self._parse_pitch_input(p, t)
feature = PitchDistribution.from_cent_pitch(
pitch_cent, kernel_width=self.kernel_width,
step_size=self.step_size)
# convert to pitch-class distribution if requested
if self.feature_type == 'pcd':
feature.to_pcd()
data_point = {'source': s, 'tonic': t, 'mode': m,
'feature': feature}
# convert to cent track and append to the mode data
model.append(data_point)
self.model = model
def identify_tonic(self, test_input, mode, min_peak_ratio=0.1,
distance_method='bhat', k_neighbor=15, rank=1):
"""--------------------------------------------------------------------
Tonic Identification: The mode of the recording is known and the
tonic is to be estimated.
:param test_input: - precomputed feature (PD or PCD in Hz)
- pitch track in Hz (list or numpy array)
:param mode: input mode label
:param min_peak_ratio: The minimum ratio between the max peak value and
the value of a detected peak
:param distance_method: distance used in KNN
:param k_neighbor: number of neighbors to select in KNN classification
:param rank: number of estimations to return
:return: ranked mode estimations
--------------------------------------------------------------------"""
test_feature = self._parse_tonic_and_joint_estimate_input(test_input)
# Tonic Estimation
estimations = self._estimate(
test_feature, est_tonic=True, mode=mode,
min_peak_ratio=min_peak_ratio, distance_method=distance_method,
k_neighbor=k_neighbor, rank=rank)
# remove the dummy tonic estimation
tonics_ranked = [(e[0][0], e[1]) for e in estimations]
return tonics_ranked
def estimate_tonic(self, test_input, mode, min_peak_ratio=0.1,
distance_method='bhat', k_neighbor=1, rank=1):
"""
Alias of "identify_tonic" method. See the documentation of
"identify_tonic" for more information.
"""
return self.identify_tonic(
test_input, mode, min_peak_ratio=min_peak_ratio,
distance_method=distance_method, k_neighbor=k_neighbor, rank=rank)
def recognize_mode(self, feature_in, tonic=None, distance_method='bhat',
k_neighbor=15, rank=1):
"""--------------------------------------------------------------------
Mode recognition: The tonic of the recording is known and the mode is
to be estimated.
:param feature_in: - precomputed feature (PitchDistribution object)
- pitch track (list or numpy array)
:param tonic: tonic frequency (float). It is needed if the feature_in
has not been normalized with respect to the tonic earlier
:param distance_method: distance used in KNN
:param k_neighbor: number of neighbors to select in KNN classification
:param rank: number of estimations to return
:return: ranked mode estimations
--------------------------------------------------------------------"""
test_feature = self._parse_mode_estimate_input(feature_in, tonic)
# Mode Estimation
estimations = self._estimate(
test_feature, est_tonic=False, mode=None,
distance_method=distance_method, k_neighbor=k_neighbor, rank=rank)
# remove the dummy tonic estimation
modes_ranked = [(e[0][1], e[1]) for e in estimations]
return modes_ranked
def estimate_mode(self, feature_in, tonic=None, distance_method='bhat',
k_neighbor=15, rank=1):
return self.recognize_mode(
feature_in, tonic=tonic, distance_method=distance_method,
k_neighbor=k_neighbor, rank=rank)
def estimate_joint(self, test_input, min_peak_ratio=0.1,
distance_method='bhat', k_neighbor=15, rank=1):
"""--------------------------------------------------------------------
Joint estimation: Estimate both the tonic and mode together
:param test_input: - precomputed feature (PD or PCD in Hz)
- pitch track in Hz (list or numpy array)
:param min_peak_ratio: The minimum ratio between the max peak value and
the value of a detected peak
:param distance_method: distance used in KNN
:param k_neighbor: number of neighbors to select in KNN classification
:param rank: number of estimations to return
:return: ranked mode and tonic estimations
--------------------------------------------------------------------"""
test_feature = self._parse_tonic_and_joint_estimate_input(test_input)
# Mode Estimation
joint_estimations = self._estimate(
test_feature, est_tonic=True, mode=None,
min_peak_ratio=min_peak_ratio, distance_method=distance_method,
k_neighbor=k_neighbor, rank=rank)
return joint_estimations
def _estimate(self, test_feature, mode=None, est_tonic=True,
min_peak_ratio=0.1, distance_method='bhat', k_neighbor=15,
rank=1):
assert est_tonic or mode is None, 'Nothing to estimate.'
if est_tonic is True:
# find the tonic candidates of the input feature
test_feature, tonic_cands, peak_idx = self._get_tonic_candidates(
test_feature, min_peak_ratio=min_peak_ratio)
else:
# dummy assign the first index
tonic_cands = np.array([test_feature.ref_freq])
peak_idx = np.array([0])
training_features, training_modes = self._get_training_model(mode)
dist_mat = KNN.generate_distance_matrix(
test_feature, peak_idx, training_features,
distance_method=distance_method)
# sort results
sorted_idx = np.argsort(dist_mat, axis=None)
sorted_dists = np.sort(dist_mat, axis=None)
sorted_tonic_cand_idx, sorted_mode_idx = np.unravel_index(
sorted_idx, dist_mat.shape)
# convert from sorted index to sorted tonic frequency and mode
sorted_tonics = tonic_cands[sorted_tonic_cand_idx]
sorted_modes = training_modes[sorted_mode_idx]
sorted_pairs = [((t, m), d) for t, m, d in
zip(sorted_tonics, sorted_modes, sorted_dists)]
# there might be enough options to get estimations up to the
# requested rank. Max is the number of unique sortd pairs
max_rank = len(set(sp[0] for sp in sorted_pairs))
# compute ranked estimations
ranked_pairs = []
for _ in range(min(rank, max_rank)):
cand_pairs = KNN.get_nearest_neighbors(sorted_pairs, k_neighbor)
estimation, sorted_pairs = KNN.classify(cand_pairs, sorted_pairs)
ranked_pairs.append(estimation)
return ranked_pairs
@staticmethod
def _get_tonic_candidates(test_feature, min_peak_ratio=0.1):
# find the global minima and shift the distribution there so
# peak detection does not fail locate a peak in the boundary in
# octave-wrapped features. For features that are not
# octave-wrapped this step is harmless.
shift_feature = copy.deepcopy(test_feature)
global_minima_idx = np.argmin(shift_feature.vals)
shift_feature.shift(global_minima_idx)
# get the peaks of the feature as the tonic candidate indices and
# compute the stable frequencies from the peak indices
peak_idx = shift_feature.detect_peaks(min_peak_ratio=min_peak_ratio)[0]
peaks_cent = shift_feature.bins[peak_idx]
freqs = Converter.cent_to_hz(peaks_cent, shift_feature.ref_freq)
# return the shifted feature, stable frequencies and their
# corresponding index in the shifted feature
return shift_feature, freqs, peak_idx
def _get_training_model(self, mode):
if mode is None:
training_features = [m['feature'] for m in self.model]
feature_modes = np.array([m['mode'] for m in self.model])
else:
training_features = [m['feature'] for m in self.model
if m['mode'] == mode]
# create dummy array with annotated mode
feature_modes = np.array(
[mode for _ in range(len(training_features))])
return training_features, feature_modes
def model_from_pickle(self, input_str):
try: # file given
self.model = pickle.load(open(input_str, 'rb'))
except IOError: # string given
self.model = pickle.loads(input_str, 'rb')
@staticmethod
def model_to_pickle(model, file_name=None):
if file_name is None:
return pickle.dumps(model)
return pickle.dump(model, open(file_name, 'wb'))
def model_from_json(self, file_name):
"""--------------------------------------------------------------------
Loads a the training model from JSON file.
-----------------------------------------------------------------------
file_name : The filename of the JSON file
--------------------------------------------------------------------
"""
try:
temp_model = json.load(open(file_name, 'r'))
except IOError: # json string
temp_model = json.loads(file_name)
for tm in temp_model:
tm['feature'] = tm['feature'] if isinstance(tm['feature'], dict) \
else tm['feature'][0]
tm['feature'] = PitchDistribution.from_dict(tm['feature'])
self.model = temp_model
@staticmethod
def model_to_json(model, file_name=None):
"""--------------------------------------------------------------------
Saves the training model to a JSON file.
-----------------------------------------------------------------------
model : Training model
file_name : The file path of the JSON file to be created. None to
return a json string
--------------------------------------------------------------------"""
temp_model = copy.deepcopy(model)
for tm in temp_model:
try:
tm['feature'] = tm['feature'].to_dict()
except AttributeError: # already a dict
assert isinstance(tm['feature'], dict), \
'The feature should have been a dict'
if file_name is None:
return json.dumps(temp_model, indent=4)
return json.dump(temp_model, open(file_name, 'w'), indent=4)
|
sertansenturk/tomato
|
src/tomato/audio/makamtonic/knnclassifier.py
|
Python
|
agpl-3.0
| 18,831
|
[
"Gaussian"
] |
4c2e0c4a84112f84a3046b2c3283047c6cfbe787fe8f452d3ee4d32f570b2471
|
import pint
__all__ = ['ureg', 'Quantity', 'Q_', 'ABOGADROS_NUMBER', 'N_A', 'hc']
ureg = pint.UnitRegistry()
Quantity = ureg.Quantity
Q_ = Quantity
# Avogadro constant
# AVOGADROS_NUMBER = 6.022140857e+23
AVOGADROS_NUMBER = (1.0 * ureg.avogadro_number).to('dimensionless').magnitude
N_A = AVOGADROS_NUMBER
# (plank const) * (speed of light) [joules meter]
# hc = 6.62607015e-34 * 299792458 = 1.9864458571489289e-25
hc = (1.0 * ureg.planck_constant * ureg.speed_of_light).to(ureg.joule * ureg.meter).magnitude
|
ecell/bioimaging
|
scopyon/constants.py
|
Python
|
bsd-3-clause
| 514
|
[
"Avogadro"
] |
42a4ed490edce95e650b1d1ecb3532e56ab32c1c5d299a587f963588e22acf41
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state
from ..utils.extmath import logsumexp, pinvh, squared_norm
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import GMM
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approcimation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class DPGMM(GMM):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``. Defaults to 1.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Maximum number of iterations to perform before convergence.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
self.verbose = verbose
super(DPGMM, self).__init__(n_components, covariance_type,
random_state=random_state,
thresh=thresh, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def fit(self, X):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
self.random_state = check_random_state(self.random_state)
## initialization step
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_features = X.shape[1]
z = np.ones((X.shape[0], self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + X.shape[0])
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
logprob = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_logprob, z = self.score_samples(X)
logprob.append(curr_logprob.sum() + self._logprior(z))
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
self._set_weights()
return self
class VBGMM(DPGMM):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Parameters
----------
n_components: int, optional
Number of mixture components. Defaults to 1.
covariance_type: string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
alpha: float, optional
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can. Defaults
to 1.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Ininite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, thresh=1e-2, verbose=False,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
thresh=thresh, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = float(alpha) / n_components
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
0asa/scikit-learn
|
sklearn/mixture/dpgmm.py
|
Python
|
bsd-3-clause
| 30,707
|
[
"Gaussian"
] |
de86f905bc170ba6ba0a71f5803b4e7c40026c2e84619c4371a5c30970b67bc9
|
# (C) British Crown Copyright 2015 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for `iris.aux_factory.AuxCoordFactory`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import iris
from iris._lazy_data import as_lazy_data, is_lazy_data
from iris.aux_factory import AuxCoordFactory
from iris.coords import AuxCoord
class Test__nd_points(tests.IrisTest):
def test_numpy_scalar_coord__zero_ndim(self):
points = np.array(1)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (), 0)
expected = np.array([1])
self.assertArrayEqual(result, expected)
def test_numpy_scalar_coord(self):
value = 1
points = np.array(value)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (), 2)
expected = np.array(value).reshape(1, 1)
self.assertArrayEqual(result, expected)
def test_numpy_simple(self):
points = np.arange(12).reshape(4, 3)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (0, 1), 2)
expected = points
self.assertArrayEqual(result, expected)
def test_numpy_complex(self):
points = np.arange(12).reshape(4, 3)
coord = AuxCoord(points)
result = AuxCoordFactory._nd_points(coord, (3, 2), 5)
expected = points.T[np.newaxis, np.newaxis, ..., np.newaxis]
self.assertArrayEqual(result, expected)
def test_lazy_simple(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
coord = AuxCoord(points)
self.assertTrue(is_lazy_data(coord.core_points()))
result = AuxCoordFactory._nd_points(coord, (0, 1), 2)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_points()))
self.assertTrue(is_lazy_data(result))
expected = raw_points
self.assertArrayEqual(result, expected)
def test_lazy_complex(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
coord = AuxCoord(points)
self.assertTrue(is_lazy_data(coord.core_points()))
result = AuxCoordFactory._nd_points(coord, (3, 2), 5)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_points()))
self.assertTrue(is_lazy_data(result))
expected = raw_points.T[np.newaxis, np.newaxis, ..., np.newaxis]
self.assertArrayEqual(result, expected)
class Test__nd_bounds(tests.IrisTest):
def test_numpy_scalar_coord__zero_ndim(self):
points = np.array(0.5)
bounds = np.arange(2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (), 0)
expected = bounds
self.assertArrayEqual(result, expected)
def test_numpy_scalar_coord(self):
points = np.array(0.5)
bounds = np.arange(2).reshape(1, 2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (), 2)
expected = bounds[np.newaxis]
self.assertArrayEqual(result, expected)
def test_numpy_simple(self):
points = np.arange(12).reshape(4, 3)
bounds = np.arange(24).reshape(4, 3, 2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (0, 1), 2)
expected = bounds
self.assertArrayEqual(result, expected)
def test_numpy_complex(self):
points = np.arange(12).reshape(4, 3)
bounds = np.arange(24).reshape(4, 3, 2)
coord = AuxCoord(points, bounds=bounds)
result = AuxCoordFactory._nd_bounds(coord, (3, 2), 5)
expected = bounds.transpose((1, 0, 2)).reshape(1, 1, 3, 4, 1, 2)
self.assertArrayEqual(result, expected)
def test_lazy_simple(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
raw_bounds = np.arange(24).reshape(4, 3, 2)
bounds = as_lazy_data(raw_bounds, raw_bounds.shape)
coord = AuxCoord(points, bounds=bounds)
self.assertTrue(is_lazy_data(coord.core_bounds()))
result = AuxCoordFactory._nd_bounds(coord, (0, 1), 2)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_bounds()))
self.assertTrue(is_lazy_data(result))
expected = raw_bounds
self.assertArrayEqual(result, expected)
def test_lazy_complex(self):
raw_points = np.arange(12).reshape(4, 3)
points = as_lazy_data(raw_points, raw_points.shape)
raw_bounds = np.arange(24).reshape(4, 3, 2)
bounds = as_lazy_data(raw_bounds, raw_bounds.shape)
coord = AuxCoord(points, bounds=bounds)
self.assertTrue(is_lazy_data(coord.core_bounds()))
result = AuxCoordFactory._nd_bounds(coord, (3, 2), 5)
# Check we haven't triggered the loading of the coordinate values.
self.assertTrue(is_lazy_data(coord.core_bounds()))
self.assertTrue(is_lazy_data(result))
expected = raw_bounds.transpose((1, 0, 2)).reshape(1, 1, 3, 4, 1, 2)
self.assertArrayEqual(result, expected)
@tests.skip_data
class Test_lazy_aux_coords(tests.IrisTest):
def setUp(self):
path = tests.get_data_path(['NetCDF', 'testing',
'small_theta_colpex.nc'])
self.cube = iris.load_cube(path, 'air_potential_temperature')
def _check_lazy(self):
coords = self.cube.aux_coords + self.cube.derived_coords
for coord in coords:
self.assertTrue(coord.has_lazy_points())
if coord.has_bounds():
self.assertTrue(coord.has_lazy_bounds())
def test_lazy_coord_loading(self):
# Test that points and bounds arrays stay lazy upon cube loading.
self._check_lazy()
def test_lazy_coord_printing(self):
# Test that points and bounds arrays stay lazy after cube printing.
_ = str(self.cube)
self._check_lazy()
if __name__ == '__main__':
tests.main()
|
LukeC92/iris
|
lib/iris/tests/unit/aux_factory/test_AuxCoordFactory.py
|
Python
|
lgpl-3.0
| 7,082
|
[
"NetCDF"
] |
4df426a5168b9561b29825a1cb796fa5b63ddcf1b906f46a1b5e9d3fa456192c
|
# -*- coding: utf-8 -*-
"""Test sequences for graphiness.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import heapq
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'
'Joel Miller (joel.c.miller.research@gmail.com)'
'Ben Edwards'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['is_graphical',
'is_multigraphical',
'is_pseudographical',
'is_digraphical',
'is_valid_degree_sequence_erdos_gallai',
'is_valid_degree_sequence_havel_hakimi',
'is_valid_degree_sequence', # deprecated
]
def is_graphical(sequence, method='eg'):
"""Returns True if sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if the sequence is a valid degree sequence and False if not.
Examples
--------
>>> G = nx.path_graph(4)
>>> sequence = (d for n, d in G.degree())
>>> nx.is_valid_degree_sequence(sequence)
True
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
is_valid_degree_sequence = is_graphical
def _basic_graphical_tests(deg_sequence):
# Sort and perform some simple tests on the sequence
if not nx.utils.is_list_of_ints(deg_sequence):
raise nx.NetworkXUnfeasible
p = len(deg_sequence)
num_degs = [0]*p
dmax, dmin, dsum, n = 0, p, 0, 0
for d in deg_sequence:
# Reject if degree is negative or larger than the sequence length
if d<0 or d>=p:
raise nx.NetworkXUnfeasible
# Process only the non-zero integers
elif d>0:
dmax, dmin, dsum, n = max(dmax,d), min(dmin,d), dsum+d, n+1
num_degs[d] += 1
# Reject sequence if it has odd sum or is oversaturated
if dsum%2 or dsum>n*(n-1):
raise nx.NetworkXUnfeasible
return dmax,dmin,dsum,n,num_degs
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem.
Worst-case run time is: O(s) where s is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
modstubs = [0]*(dmax+1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1;
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n-1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax]-1, n-1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k]-1, n-1
if k > 1:
modstubs[mslen] = k-1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub]+1, n+1
return True
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation is done using the Erdős-Gallai theorem [EG1960]_.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
This implementation uses an equivalent form of the Erdős-Gallai criterion.
Worst-case run time is: O(n) where n is the length of the sequence.
Specifically, a sequence d is graphical if and only if the
sum of the sequence is even and for all strong indices k in the sequence,
.. math::
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
A strong index k is any index where `d_k \geq k` and the value `n_j` is the
number of occurrences of j in d. The maximal strong index is called the
Durfee index.
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [2]_.
References
----------
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
Discrete Mathematics, 265, pp. 417-420 (2003).
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[EG1960]_, [choudum1986]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
# Perform the EG checks using the reformulation of Zverovich and Zverovich
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
for dk in range(dmax, dmin-1, -1):
if dk < k+1: # Check if already past Durfee index
return True
if num_degs[dk] > 0:
run_size = num_degs[dk] # Process a run of identical-valued degrees
if dk < k+run_size: # Check if end of run is past Durfee index
run_size = dk-k # Adjust back to Durfee index
sum_deg += run_size * dk
for v in range(run_size):
sum_nj += num_degs[k+v]
sum_jnj += (k+v) * num_degs[k+v]
k += run_size
if sum_deg > k*(n-1) - k*sum_nj + sum_jnj:
return False
return True
def is_multigraphical(sequence):
"""Returns True if some multigraph can realize the sequence.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is a multigraphic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
(1962).
"""
deg_sequence = list(sequence)
if not nx.utils.is_list_of_ints(deg_sequence):
return False
dsum, dmax = 0, 0
for d in deg_sequence:
if d<0:
return False
dsum, dmax = dsum+d, max(dmax,d)
if dsum%2 or dsum<2*dmax:
return False
return True
def is_pseudographical(sequence):
"""Returns True if some pseudograph can realize the sequence.
Every nonnegative integer sequence with an even sum is pseudographical
(see [1]_).
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
Returns
-------
valid : bool
True if the sequence is a pseudographic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
pp. 778-782 (1976).
"""
s = list(sequence)
if not nx.utils.is_list_of_ints(s):
return False
return sum(s)%2 == 0 and min(s) >= 0
def is_digraphical(in_sequence, out_sequence):
r"""Returns True if some directed graph can realize the in- and out-degree
sequences.
Parameters
----------
in_sequence : list or iterable container
A sequence of integer node in-degrees
out_sequence : list or iterable container
A sequence of integer node out-degrees
Returns
-------
valid : bool
True if in and out-sequences are digraphic False if not.
Notes
-----
This algorithm is from Kleitman and Wang [1]_.
The worst case runtime is O(s * log n) where s and n are the sum and length
of the sequences respectively.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = list(in_sequence)
out_deg_sequence = list(out_sequence)
if not nx.utils.is_list_of_ints(in_deg_sequence):
return False
if not nx.utils.is_list_of_ints(out_deg_sequence):
return False
# Process the sequences and form two heaps to store degree pairs with
# either zero or non-zero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
maxin = 0
if maxn==0:
return True
stubheap, zeroheap = [ ], [ ]
for n in range(maxn):
in_deg, out_deg = 0, 0
if n<nout:
out_deg = out_deg_sequence[n]
if n<nin:
in_deg = in_deg_sequence[n]
if in_deg<0 or out_deg<0:
return False
sumin, sumout, maxin = sumin+in_deg, sumout+out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1*out_deg, -1*in_deg))
elif out_deg > 0:
zeroheap.append(-1*out_deg)
if sumin != sumout:
return False
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0,0)]*(maxin+1)
# Successively reduce degree sequence by removing the maximum out degree
while stubheap:
# Take the first value in the sequence with non-zero in degree
(freeout, freein) = heapq.heappop( stubheap )
freein *= -1
if freein > len(stubheap)+len(zeroheap):
return False
# Attach out stubs to the nodes with the most in stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
stubout = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin) = heapq.heappop(stubheap)
if stubout == 0:
return False
# Check if target is now totally connected
if stubout+1<0 or stubin<0:
modstubs[mslen] = (stubout+1, stubin)
mslen += 1
# Add back the nodes to the heap that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, stub[0])
if freeout<0:
heapq.heappush(zeroheap, freeout)
return True
|
jcurbelo/networkx
|
networkx/algorithms/graphical.py
|
Python
|
bsd-3-clause
| 12,997
|
[
"Brian"
] |
0c54bc2a3fe009a6a93f8e9990e632bab6ccb0ffb9b96b19b5c177324e915275
|
# -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2006 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Simple Base64 ICookieFile implementation """
import binascii
import logging
import os
from zope.interface import implementer
from stoqlib.lib.interfaces import CookieError, ICookieFile
log = logging.getLogger(__name__)
@implementer(ICookieFile)
class Base64CookieFile(object):
def __init__(self, filename):
self._filename = filename
def get(self):
if not os.path.exists(self._filename):
raise CookieError("%s does not exist" % self._filename)
cookiedata = open(self._filename).read()
if not ':' in cookiedata:
log.info("invalid cookie file")
raise CookieError("%s is invalid" % self._filename)
data = cookiedata.split(":", 1)
try:
return (unicode(data[0]), unicode(binascii.a2b_base64(data[1])))
except binascii.Error:
raise CookieError("invalid format")
def clear(self):
try:
os.remove(self._filename)
log.info("Removed cookie %s" % self._filename)
except OSError as e:
log.info("Could not remove file %s: %r" % (self._filename, e))
def store(self, username, password):
if not username:
raise CookieError("a username is required")
try:
fd = open(self._filename, "w")
except IOError as e:
raise CookieError("Could open file %s: %r" % (
self._filename, e))
# obfuscate password to avoid it being easily identified when
# editing file on screen. this is *NOT* encryption!
fd.write("%s:%s" % (username, binascii.b2a_base64(password or '')))
fd.close()
log.info("Saved cookie %s" % self._filename)
|
andrebellafronte/stoq
|
stoqlib/lib/cookie.py
|
Python
|
gpl-2.0
| 2,633
|
[
"VisIt"
] |
3ca7e31e18cd3fbb219bf7a11ef3639330efde0d7bbad9361d550e1a683b10ed
|
import gen_utils
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
EMODES = {
1:'Point seeded regions',
2:'Cell seeded regions',
3:'Specified regions',
4:'Largest region',
5:'All regions',
6:'Closest point region'
}
class polyDataConnect(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
self._polyDataConnect = vtk.vtkPolyDataConnectivityFilter()
# we're not going to use this feature just yet
self._polyDataConnect.ScalarConnectivityOff()
#
self._polyDataConnect.SetExtractionModeToPointSeededRegions()
module_utils.setup_vtk_object_progress(self, self._polyDataConnect,
'Finding connected surfaces')
# default is point seeded regions (we store zero-based)
self._config.extraction_mode = 0
self._config.colour_regions = 0
config_list = [
('Extraction mode:', 'extraction_mode', 'base:int',
'choice',
'What kind of connected regions should be extracted.',
[EMODES[i] for i in range(1,7)]),
('Colour regions:', 'colour_regions', 'base:int',
'checkbox',
'Should connected regions be coloured differently.')
]
# and the mixin constructor
ScriptedConfigModuleMixin.__init__(
self, config_list,
{'Module (self)' : self,
'vtkPolyDataConnectivityFilter' : self._polyDataConnect})
# we'll use this to keep a binding (reference) to the passed object
self._input_points = None
# this will be our internal list of points
self._seedIds = []
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
self.set_input(0, None)
# don't forget to call the close() method of the vtkPipeline mixin
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._polyDataConnect
def get_input_descriptions(self):
return ('vtkPolyData', 'Seed points')
def set_input(self, idx, inputStream):
if idx == 0:
# will work for None and not-None
self._polyDataConnect.SetInput(inputStream)
else:
self._input_points = inputStream
def get_output_descriptions(self):
return (self._polyDataConnect.GetOutput().GetClassName(),)
def get_output(self, idx):
return self._polyDataConnect.GetOutput()
def logic_to_config(self):
# extractionmodes in vtkPolyDataCF start at 1
# we store it as 0-based
emode = self._polyDataConnect.GetExtractionMode()
self._config.extraction_mode = emode - 1
self._config.colour_regions = \
self._polyDataConnect.GetColorRegions()
def config_to_logic(self):
# extractionmodes in vtkPolyDataCF start at 1
# we store it as 0-based
self._polyDataConnect.SetExtractionMode(
self._config.extraction_mode + 1)
self._polyDataConnect.SetColorRegions(
self._config.colour_regions)
def execute_module(self):
if self._polyDataConnect.GetExtractionMode() == 1:
self._sync_pdc_to_input_points()
self._polyDataConnect.Update()
def _sync_pdc_to_input_points(self):
# extract a list from the input points
temp_list = []
if self._input_points and self._polyDataConnect.GetInput():
for i in self._input_points:
id = self._polyDataConnect.GetInput().FindPoint(i['world'])
if id > 0:
temp_list.append(id)
if temp_list != self._seedIds:
self._seedIds = temp_list
# I'm hoping this clears the list
self._polyDataConnect.InitializeSeedList()
for seedId in self._seedIds:
self._polyDataConnect.AddSeed(seedId)
print "adding %d" % (seedId)
|
nagyistoce/devide
|
modules/filters/polyDataConnect.py
|
Python
|
bsd-3-clause
| 4,397
|
[
"VTK"
] |
0432087a3be184a133ebd9e743e7f9eb316e37c372970a2a5000ff844f0c0c7f
|
#!/usr/bin/env python
#os imports
import os
from sys import stdin,argv
import sys
from optparse import OptionParser
#Title:
#script to reformt fasta ID names.
#############################################################################
#functions
def reformat_fasta_name(filename, databse, out):
"this function re-write a file as a fasta file but with altered names"
f= open(out, 'w')
f_in = open(fasta, "r")
name = []
count = 0
for seq_record in SeqIO.parse(filename, "fasta"):
count = count+1
old_to_new_name = "%s\tseqID%d_1\n" % (seq_record.id, count)
name.append(old_to_new_name)
#remove the read prefix to get to uniq names
# underscroe _1 implies an ubundance of 1 for swarm
seq_record.id = "seqID%d_1" % (count)
seq_record.description = ""
SeqIO.write(seq_record, f, "fasta")
name_out = open(databse, "w")
#file to keep track of the original names if we need them
for i in name:
name_out.write(i)
name_out.close()
f.close()
f_in.close()
#################################################################################################
if "-v" in sys.argv or "--version" in sys.argv:
print "v0.0.1"
sys.exit(0)
usage = """Use as follows:
$ python complete....py -f seq.fasta -d database.out -o out.fasta
script to reformt fasta names.
# names wend with an
underscroe _1, implies an ubundance of 1 for swarm
requires Biopython
"""
parser = OptionParser(usage=usage)
parser.add_option("-f","--fasta", dest="fasta", default=None,
help="fasta file to have names altered")
parser.add_option("-d", "--databse", dest="databse", default=None,
help="outfile to keep track of old and new ID names",
metavar="FILE")
parser.add_option("-o", "--out", dest="out", default=None,
help="output filename")
(options, args) = parser.parse_args()
fasta = options.fasta
databse = options.databse
out = options.out
#run the program
#biopython imports
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
reformat_fasta_name(fasta, databse, out)
|
widdowquinn/THAPBI
|
Phyt_ITS_identifying_pipeline/Python_ITS_scripts/completely_rename_ID.py
|
Python
|
mit
| 2,265
|
[
"Biopython"
] |
9a77c7208f2cc838501ea4a64673df5046e1ecdb4c3d417b08c46a39a98d27ff
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.