repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
yarikoptic/pystatsmodels | statsmodels/sandbox/distributions/try_pot.py | 5 | 2283 | # -*- coding: utf-8 -*-
"""
Created on Wed May 04 06:09:18 2011
@author: josef
"""
import numpy as np
def mean_residual_life(x, frac=None, alpha=0.05):
'''emprirical mean residual life or expected shortfall
Parameters
----------
todo: check formula for std of mean
doesn't include case for all observations
last observations std is zero
vectorize loop using cumsum
frac doesn't work yet
'''
axis = 0 #searchsorted is 1d only
x = np.asarray(x)
nobs = x.shape[axis]
xsorted = np.sort(x, axis=axis)
if frac is None:
xthreshold = xsorted
else:
xthreshold = xsorted[np.floor(nobs * frac).astype(int)]
#use searchsorted instead of simple index in case of ties
xlargerindex = np.searchsorted(xsorted, xthreshold, side='right')
#replace loop with cumsum ?
result = []
for i in range(len(xthreshold)-1):
k_ind = xlargerindex[i]
rmean = x[k_ind:].mean()
rstd = x[k_ind:].std() #this doesn't work for last observations, nans
rmstd = rstd/np.sqrt(nobs-k_ind) #std error of mean, check formula
result.append((k_ind, xthreshold[i], rmean, rmstd))
res = np.array(result)
crit = 1.96 # todo: without loading stats, crit = -stats.t.ppf(0.05)
confint = res[:,1:2] + crit * res[:,-1:] * np.array([[-1,1]])
return np.column_stack((res, confint))
expected_shortfall = mean_residual_life #alias
if __name__ == "__main__":
rvs = np.random.standard_t(5, size= 10)
res = mean_residual_life(rvs)
print res
rmean = [rvs[i:].mean() for i in range(len(rvs))]
print res[:,2] - rmean[1:]
'''
>>> mean_residual_life(rvs, frac= 0.5)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "E:\Josef\eclipsegworkspace\statsmodels-josef-experimental-030\scikits\statsmodels\sandbox\distributions\try_pot.py", line 35, in mean_residual_life
for i in range(len(xthreshold)-1):
TypeError: object of type 'numpy.float64' has no len()
>>> mean_residual_life(rvs, frac= [0.5])
array([[ 1. , -1.16904459, 0.35165016, 0.41090978, -1.97442776,
-0.36366142],
[ 1. , -1.16904459, 0.35165016, 0.41090978, -1.97442776,
-0.36366142],
[ 1. , -1.1690445
'''
| bsd-3-clause | 7131197cd7ebe67e4dc1a93738974f68 | 30.273973 | 155 | 0.619799 | 3.023841 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/examples/l1_demo/demo.py | 3 | 13517 | from optparse import OptionParser
import statsmodels.api as sm
import scipy as sp
from scipy import linalg
from scipy import stats
import pdb
# pdb.set_trace()
docstr = """
Demonstrates l1 regularization for likelihood models.
Use different models by setting mode = mnlogit, logit, or probit.
Examples
-------
$ python demo.py --get_l1_slsqp_results logit
>>> import demo
>>> demo.run_demo('logit')
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The solvers are slower than standard Newton, and sometimes have
convergence issues Nonetheless, the final solution makes sense and
is often better than the ML solution.
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
def main():
"""
Provides a CLI for the demo.
"""
usage = "usage: %prog [options] mode"
usage += '\n'+docstr
parser = OptionParser(usage=usage)
# base_alpha
parser.add_option("-a", "--base_alpha",
help="Size of regularization param (the param actully used will "\
"automatically scale with data size in this demo) "\
"[default: %default]",
dest='base_alpha', action='store', type='float', default=0.01)
# num_samples
parser.add_option("-N", "--num_samples",
help="Number of data points to generate for fit "\
"[default: %default]",
dest='N', action='store', type='int', default=500)
# get_l1_slsqp_results
parser.add_option("--get_l1_slsqp_results",
help="Do an l1 fit using slsqp. [default: %default]", \
action="store_true",dest='get_l1_slsqp_results', default=False)
# get_l1_cvxopt_results
parser.add_option("--get_l1_cvxopt_results",
help="Do an l1 fit using cvxopt. [default: %default]", \
action="store_true",dest='get_l1_cvxopt_results', default=False)
# num_nonconst_covariates
parser.add_option("--num_nonconst_covariates",
help="Number of covariates that are not constant "\
"(a constant will be prepended) [default: %default]",
dest='num_nonconst_covariates', action='store',
type='int', default=10)
# noise_level
parser.add_option("--noise_level",
help="Level of the noise relative to signal [default: %default]",
dest='noise_level', action='store', type='float',
default=0.2)
# cor_length
parser.add_option("--cor_length",
help="Correlation length of the (Gaussian) independent variables"\
"[default: %default]",
dest='cor_length', action='store', type='float',
default=2)
# num_zero_params
parser.add_option("--num_zero_params",
help="Number of parameters equal to zero for every target in "\
"logistic regression examples. [default: %default]",
dest='num_zero_params', action='store', type='int',
default=8)
# num_targets
parser.add_option("-J", "--num_targets",
help="Number of choices for the endogenous response in "\
"multinomial logit example [default: %default]",
dest='num_targets', action='store', type='int', default=3)
# print_summaries
parser.add_option("-s", "--print_summaries",
help="Print the full fit summary. [default: %default]", \
action="store_true",dest='print_summaries', default=False)
# save_arrays
parser.add_option("--save_arrays",
help="Save exog/endog/true_params to disk for future use. "\
"[default: %default]",
action="store_true",dest='save_arrays', default=False)
# load_old_arrays
parser.add_option("--load_old_arrays",
help="Load exog/endog/true_params arrays from disk. "\
"[default: %default]",
action="store_true",dest='load_old_arrays', default=False)
(options, args) = parser.parse_args()
assert len(args) == 1
mode = args[0].lower()
run_demo(mode, **options.__dict__)
def run_demo(mode, base_alpha=0.01, N=500, get_l1_slsqp_results=False,
get_l1_cvxopt_results=False, num_nonconst_covariates=10,
noise_level=0.2, cor_length=2, num_zero_params=8, num_targets=3,
print_summaries=False, save_arrays=False, load_old_arrays=False):
"""
Run the demo and print results.
Parameters
----------
mode : String
either 'logit', 'mnlogit', or 'probit'
base_alpha : Float
Size of regularization param (the param actually used will
automatically scale with data size in this demo)
N : Integer
Number of data points to generate for fit
get_l1_slsqp_results : boolean,
Do an l1 fit using slsqp.
get_l1_cvxopt_results : boolean
Do an l1 fit using cvxopt
num_nonconst_covariates : Integer
Number of covariates that are not constant
(a constant will be prepended)
noise_level : Float (non-negative)
Level of the noise relative to signal
cor_length : Float (non-negative)
Correlation length of the (Gaussian) independent variables
num_zero_params : Integer
Number of parameters equal to zero for every target in logistic
regression examples.
num_targets : Integer
Number of choices for the endogenous response in multinomial logit
example
print_summaries : Boolean
print the full fit summary.
save_arrays : Boolean
Save exog/endog/true_params to disk for future use.
load_old_arrays
Load exog/endog/true_params arrays from disk.
"""
if mode != 'mnlogit':
print "Setting num_targets to 2 since mode != 'mnlogit'"
num_targets = 2
models = {
'logit': sm.Logit, 'mnlogit': sm.MNLogit, 'probit': sm.Probit}
endog_funcs = {
'logit': get_logit_endog, 'mnlogit': get_logit_endog,
'probit': get_probit_endog}
# The regularization parameter
# Here we scale it with N for simplicity. In practice, you should
# use cross validation to pick alpha
alpha = base_alpha * N * sp.ones((num_nonconst_covariates+1, num_targets-1))
alpha[0,:] = 0 # Don't regularize the intercept
#### Make the data and model
exog = get_exog(N, num_nonconst_covariates, cor_length)
exog = sm.add_constant(exog)
true_params = sp.rand(num_nonconst_covariates+1, num_targets-1)
if num_zero_params:
true_params[-num_zero_params:, :] = 0
endog = endog_funcs[mode](true_params, exog, noise_level)
endog, exog, true_params = save_andor_load_arrays(
endog, exog, true_params, save_arrays, load_old_arrays)
model = models[mode](endog, exog)
#### Get the results and print
results = run_solvers(model, true_params, alpha,
get_l1_slsqp_results, get_l1_cvxopt_results, print_summaries)
summary_str = get_summary_str(results, true_params, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries)
print summary_str
def run_solvers(model, true_params, alpha, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries):
"""
Runs the solvers using the specified settings and returns a result string.
Works the same for any l1 penalized likelihood model.
"""
results = {}
#### Train the models
# Get ML results
results['results_ML'] = model.fit(method='newton')
# Get l1 results
start_params = results['results_ML'].params.ravel(order='F')
if get_l1_slsqp_results:
results['results_l1_slsqp'] = model.fit_regularized(
method='l1', alpha=alpha, maxiter=1000,
start_params=start_params, retall=True)
if get_l1_cvxopt_results:
results['results_l1_cvxopt_cp'] = model.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha, maxiter=50,
start_params=start_params, retall=True, feastol=1e-5)
return results
def get_summary_str(results, true_params, get_l1_slsqp_results,
get_l1_cvxopt_results, print_summaries):
"""
Gets a string summarizing the results.
"""
#### Extract specific results
results_ML = results['results_ML']
RMSE_ML = get_RMSE(results_ML, true_params)
if get_l1_slsqp_results:
results_l1_slsqp = results['results_l1_slsqp']
if get_l1_cvxopt_results:
results_l1_cvxopt_cp = results['results_l1_cvxopt_cp']
#### Format summaries
# Short summary
print_str = '\n\n=========== Short Error Summary ============'
print_str += '\n\n The maximum likelihood fit RMS error = %.4f'%RMSE_ML
if get_l1_slsqp_results:
RMSE_l1_slsqp = get_RMSE(results_l1_slsqp, true_params)
print_str += '\n The l1_slsqp fit RMS error = %.4f'%RMSE_l1_slsqp
if get_l1_cvxopt_results:
RMSE_l1_cvxopt_cp = get_RMSE(results_l1_cvxopt_cp, true_params)
print_str += '\n The l1_cvxopt_cp fit RMS error = %.4f'%RMSE_l1_cvxopt_cp
# Parameters
print_str += '\n\n\n============== Parameters ================='
print_str += "\n\nTrue parameters: \n%s"%true_params
# Full summary
if print_summaries:
print_str += '\n' + results_ML.summary().as_text()
if get_l1_slsqp_results:
print_str += '\n' + results_l1_slsqp.summary().as_text()
if get_l1_cvxopt_results:
print_str += '\n' + results_l1_cvxopt_cp.summary().as_text()
else:
print_str += '\n\nThe maximum likelihood params are \n%s'%results_ML.params
if get_l1_slsqp_results:
print_str += '\n\nThe l1_slsqp params are \n%s'%results_l1_slsqp.params
if get_l1_cvxopt_results:
print_str += '\n\nThe l1_cvxopt_cp params are \n%s'%\
results_l1_cvxopt_cp.params
# Return
return print_str
def save_andor_load_arrays(
endog, exog, true_params, save_arrays, load_old_arrays):
if save_arrays:
sp.save('endog.npy', endog)
sp.save('exog.npy', exog)
sp.save('true_params.npy', true_params)
if load_old_arrays:
endog = sp.load('endog.npy')
exog = sp.load('exog.npy')
true_params = sp.load('true_params.npy')
return endog, exog, true_params
def get_RMSE(results, true_params):
"""
Gets the (normalized) root mean square error.
"""
diff = results.params.reshape(true_params.shape) - true_params
raw_RMSE = sp.sqrt(((diff)**2).sum())
param_norm = sp.sqrt((true_params**2).sum())
return raw_RMSE / param_norm
def get_logit_endog(true_params, exog, noise_level):
"""
Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level.
"""
N = exog.shape[0]
### Create the probability of entering the different classes,
### given exog and true_params
Xdotparams = sp.dot(exog, true_params)
noise = noise_level * sp.randn(*Xdotparams.shape)
eXB = sp.column_stack((sp.ones(len(Xdotparams)), sp.exp(Xdotparams)))
class_probabilities = eXB / eXB.sum(1)[:, None]
### Create the endog
cdf = class_probabilities.cumsum(axis=1)
endog = sp.zeros(N)
for i in xrange(N):
endog[i] = sp.searchsorted(cdf[i, :], sp.rand())
return endog
def get_probit_endog(true_params, exog, noise_level):
"""
Gets an endogenous response that is consistent with the true_params,
perturbed by noise at noise_level.
"""
N = exog.shape[0]
### Create the probability of entering the different classes,
### given exog and true_params
Xdotparams = sp.dot(exog, true_params)
noise = noise_level * sp.randn(*Xdotparams.shape)
### Create the endog
cdf = stats.norm._cdf(-Xdotparams)
endog = sp.zeros(N)
for i in xrange(N):
endog[i] = sp.searchsorted(cdf[i, :], sp.rand())
return endog
def get_exog(N, num_nonconst_covariates, cor_length):
"""
Returns an exog array with correlations determined by cor_length.
The covariance matrix of exog will have (asymptotically, as
:math:'N\\to\\inf')
.. math:: Cov[i,j] = \\exp(-|i-j| / cor_length)
Higher cor_length makes the problem more ill-posed, and easier to screw
up with noise.
BEWARE: With very long correlation lengths, you often get a singular KKT
matrix (during the l1_cvxopt_cp fit)
"""
## Create the noiseless exog
uncorrelated_exog = sp.randn(N, num_nonconst_covariates)
if cor_length == 0:
exog = uncorrelated_exog
else:
cov_matrix = sp.zeros((num_nonconst_covariates, num_nonconst_covariates))
j = sp.arange(num_nonconst_covariates)
for i in xrange(num_nonconst_covariates):
cov_matrix[i,:] = sp.exp(-sp.fabs(i-j) / cor_length)
chol = linalg.cholesky(cov_matrix) # cov_matrix = sp.dot(chol.T, chol)
exog = sp.dot(uncorrelated_exog, chol)
## Return
return exog
if __name__ == '__main__':
main()
| bsd-3-clause | 6ca53a0bae4d22236bd45be554894b30 | 37.076056 | 83 | 0.620256 | 3.466786 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/datasets/grunfeld/data.py | 3 | 2696 | """Grunfeld (1950) Investment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """This is the Grunfeld (1950) Investment Data.
The source for the data was the original 11-firm data set from Grunfeld's Ph.D.
thesis recreated by Kleiber and Zeileis (2008) "The Grunfeld Data at 50".
The data can be found here.
http://statmath.wu-wien.ac.at/~zeileis/grunfeld/
For a note on the many versions of the Grunfeld data circulating see:
http://www.stanford.edu/~clint/bench/grunfeld.htm
"""
DESCRSHORT = """Grunfeld (1950) Investment Data for 11 U.S. Firms."""
DESCRLONG = DESCRSHORT
NOTE = """Number of observations - 220 (20 years for 11 firms)
Number of variables - 5
Variables name definitions::
invest - Gross investment in 1947 dollars
value - Market value as of Dec. 31 in 1947 dollars
capital - Stock of plant and equipment in 1947 dollars
firm - General Motors, US Steel, General Electric, Chrysler,
Atlantic Refining, IBM, Union Oil, Westinghouse, Goodyear,
Diamond Match, American Steel
year - 1935 - 1954
Note that raw_data has firm expanded to dummy variables, since it is a
string categorical variable.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.tools import categorical
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray(data, endog_idx=0, stack=False)
ds.raw_data = raw_data
return ds
def load_pandas():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
from pandas import DataFrame
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray_pandas(data, endog_idx=0)
ds.raw_data = DataFrame(raw_data)
return ds
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/grunfeld.csv','rb'), delimiter=",",
names=True, dtype="f8,f8,f8,a17,f8")
return data
| bsd-3-clause | 009ff21023652e3644e1d80681cc34e4 | 28.955556 | 79 | 0.671736 | 3.378446 | false | false | false | false |
yarikoptic/pystatsmodels | examples/example_rlm.py | 2 | 3080 | """
Robust Linear Models
Notes
-----
The syntax for the arguments will be shortened to accept string arguments
in the future.
"""
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#Estimating RLM
#--------------
# Load data
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print hub_results.params
print hub_results.bse
varnames = ['var_%d' % i for i in range(len(hub_results.params))]
print hub_results.summary(yname='y', xname=varnames)
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print hub_results2.params
print hub_results2.bse
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(),
cov='H3')
print andrew_results.params
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for
# scale options
#Comparing OLS and RLM
#---------------------
#Artificial data
#^^^^^^^^^^^^^^^
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, (x1 - 5)**2, np.ones(nsample)]
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [0.5, -0.0, 5.]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig * 1. * np.random.normal(size=nsample)
y2[[39, 41, 43, 45, 48]] -= 5 # add some outliers (10% of nsample)
#Example: quadratic function with linear truth
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print res.params
print res.bse
print res.predict
# Estimate RLM
resrlm = sm.RLM(y2, X).fit()
print resrlm.params
print resrlm.bse
# Draw a plot to compare OLS estimates to the robust estimates
plt.figure();
plt.plot(x1, y2, 'o', x1, y_true2, 'b-');
prstd, iv_l, iv_u = wls_prediction_std(res);
plt.plot(x1, res.fittedvalues, 'r-');
plt.plot(x1, iv_u, 'r--');
plt.plot(x1, iv_l, 'r--');
plt.plot(x1, resrlm.fittedvalues, 'g.-');
#@savefig rlm_ols_0.png
plt.title('blue: true, red: OLS, green: RLM');
#Example: linear function with linear truth
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Fit a new OLS model using only the linear term and the constant
X2 = X[:, [0, 2]]
res2 = sm.OLS(y2, X2).fit()
print res2.params
print res2.bse
# Estimate RLM
resrlm2 = sm.RLM(y2, X2).fit()
print resrlm2.params
print resrlm2.bse
# Draw a plot to compare OLS estimates to the robust estimates
prstd, iv_l, iv_u = wls_prediction_std(res2)
plt.figure();
plt.plot(x1, y2, 'o', x1, y_true2, 'b-');
plt.plot(x1, res2.fittedvalues, 'r-');
plt.plot(x1, iv_u, 'r--');
plt.plot(x1, iv_l, 'r--');
plt.plot(x1, resrlm2.fittedvalues, 'g.-');
#@savefig rlm_ols_1.png
plt.title('blue: true, red: OLS, green: RLM');
| bsd-3-clause | 7bc1c78c2ed8830be8b1ccc8aca647db | 28.615385 | 79 | 0.661688 | 2.652885 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/survival2.py | 35 | 17924 | #Kaplan-Meier Estimator
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
from scipy import stats
from statsmodels.iolib.table import SimpleTable
class KaplanMeier(object):
"""
KaplanMeier(...)
KaplanMeier(data, endog, exog=None, censoring=None)
Create an object of class KaplanMeier for estimating
Kaplan-Meier survival curves.
Parameters
----------
data: array_like
An array, with observations in each row, and
variables in the columns
endog: index (starting at zero) of the column
containing the endogenous variable (time)
exog: index of the column containing the exogenous
variable (must be catagorical). If exog = None, this
is equivalent to a single survival curve
censoring: index of the column containing an indicator
of whether an observation is an event, or a censored
observation, with 0 for censored, and 1 for an event
Attributes
-----------
censorings: List of censorings associated with each unique
time, at each value of exog
events: List of the number of events at each unique time
for each value of exog
results: List of arrays containing estimates of the value
value of the survival function and its standard error
at each unique time, for each value of exog
ts: List of unique times for each value of exog
Methods
-------
fit: Calcuate the Kaplan-Meier estimates of the survival
function and its standard error at each time, for each
value of exog
plot: Plot the survival curves using matplotlib.plyplot
summary: Display the results of fit in a table. Gives results
for all (including censored) times
test_diff: Test for difference between survival curves
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02],
[ 9.00000000e+00, 1.13800000e-02],
[ 1.30000000e+01, 1.13800000e-02],
[ 1.40000000e+01, 1.13800000e-02],
[ 2.60000000e+01, 1.13800000e-02]])
>>> km = KaplanMeier(dta,0)
>>> km.fit()
>>> km.plot()
Doing
>>> km.summary()
will display a table of the estimated survival and standard errors
for each time. The first few lines are
Kaplan-Meier Curve
=====================================
Time Survival Std. Err
-------------------------------------
1.0 0.983870967742 0.0159984306572
2.0 0.91935483871 0.0345807888235
3.0 0.854838709677 0.0447374942184
4.0 0.838709677419 0.0467104592871
5.0 0.822580645161 0.0485169952543
Doing
>>> plt.show()
will plot the survival curve
Mutliple survival curves:
>>> km2 = KaplanMeier(dta,0,exog=1)
>>> km2.fit()
km2 will estimate a survival curve for each value of industrial
production, the column of dta with index one (1).
With censoring:
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 9.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 1.30000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 1.40000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 2.60000000e+01, 1.13800000e-02, 1.00000000e+00]])
>>> km3 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km3.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves for exog = 0.0645
and exog = -0.03957, the index one element is the degrees of freedom for
the test, and the index two element is the p-value for the test
Groups with nan names
>>> groups = np.ones_like(dta[:,1])
>>> groups = groups.astype('S4')
>>> groups[dta[:,1] > 0] = 'high'
>>> groups[dta[:,1] <= 0] = 'low'
>>> dta = dta.astype('S4')
>>> dta[:,1] = groups
>>> dta[range(5),:]
array([['7.0', 'high', '1.0'],
['9.0', 'high', '1.0'],
['13.0', 'high', '1.0'],
['14.0', 'high', '1.0'],
['26.0', 'high', '1.0']],
dtype='|S4')
>>> km4 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km4.fit()
"""
def __init__(self, data, endog, exog=None, censoring=None):
self.exog = exog
self.censoring = censoring
cols = [endog]
self.endog = 0
if exog != None:
cols.append(exog)
self.exog = 1
if censoring != None:
cols.append(censoring)
if exog != None:
self.censoring = 2
else:
self.censoring = 1
data = data[:,cols]
if data.dtype == float or data.dtype == int:
self.data = data[~np.isnan(data).any(1)]
else:
t = (data[:,self.endog]).astype(float)
if exog != None:
evec = data[:,self.exog]
evec = evec[~np.isnan(t)]
if censoring != None:
cvec = (data[:,self.censoring]).astype(float)
cvec = cvec[~np.isnan(t)]
t = t[~np.isnan(t)]
if censoring != None:
t = t[~np.isnan(cvec)]
if exog != None:
evec = evec[~np.isnan(cvec)]
cvec = cvec[~np.isnan(cvec)]
cols = [t]
if exog != None:
cols.append(evec)
if censoring != None:
cols.append(cvec)
data = (np.array(cols)).transpose()
self.data = data
def fit(self):
"""
Calculate the Kaplan-Meier estimator of the survival function
"""
self.results = []
self.ts = []
self.censorings = []
self.event = []
if self.exog == None:
self.fitting_proc(self.data)
else:
groups = np.unique(self.data[:,self.exog])
self.groups = groups
for g in groups:
group = self.data[self.data[:,self.exog] == g]
self.fitting_proc(group)
def plot(self):
"""
Plot the estimated survival curves. After using this method
do
plt.show()
to display the plot
"""
plt.figure()
if self.exog == None:
self.plotting_proc(0)
else:
for g in range(len(self.groups)):
self.plotting_proc(g)
plt.ylim(ymax=1.05)
plt.ylabel('Survival')
plt.xlabel('Time')
def summary(self):
"""
Print a set of tables containing the estimates of the survival
function, and its standard errors
"""
if self.exog == None:
self.summary_proc(0)
else:
for g in range(len(self.groups)):
self.summary_proc(g)
def fitting_proc(self, group):
"""
For internal use
"""
t = ((group[:,self.endog]).astype(float)).astype(int)
if self.censoring == None:
events = np.bincount(t)
t = np.unique(t)
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1]
else:
censoring = ((group[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
events = np.bincount(t,censoring)
censored = np.bincount(t,reverseCensoring)
t = np.unique(t)
censored = censored[:,list(t)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1] - censoredSum[:-1]
(self.censorings).append(censored)
survival = np.cumprod(1-events/n)
var = ((survival*survival) *
np.cumsum(events/(n*(n-events))))
se = np.sqrt(var)
(self.results).append(np.array([survival,se]))
(self.ts).append(t)
(self.event).append(events)
def plotting_proc(self, g):
"""
For internal use
"""
survival = self.results[g][0]
t = self.ts[g]
e = (self.event)[g]
if self.censoring != None:
c = self.censorings[g]
csurvival = survival[c != 0]
ct = t[c != 0]
if len(ct) != 0:
plt.vlines(ct,csurvival+0.02,csurvival-0.02)
x = np.repeat(t[e != 0], 2)
y = np.repeat(survival[e != 0], 2)
if self.ts[g][-1] in t[e != 0]:
x = np.r_[0,x]
y = np.r_[1,1,y[:-1]]
else:
x = np.r_[0,x,self.ts[g][-1]]
y = np.r_[1,1,y]
plt.plot(x,y)
def summary_proc(self, g):
"""
For internal use
"""
if self.exog != None:
myTitle = ('exog = ' + str(self.groups[g]) + '\n')
else:
myTitle = "Kaplan-Meier Curve"
table = np.transpose(self.results[g])
table = np.c_[np.transpose(self.ts[g]),table]
table = SimpleTable(table, headers=['Time','Survival','Std. Err'],
title = myTitle)
print(table)
def test_diff(self, groups, rho=None, weight=None):
"""
test_diff(groups, rho=0)
Test for difference between survival curves
Parameters
----------
groups: A list of the values for exog to test for difference.
tests the null hypothesis that the survival curves for all
values of exog in groups are equal
rho: compute the test statistic with weight S(t)^rho, where
S(t) is the pooled estimate for the Kaplan-Meier survival function.
If rho = 0, this is the logrank test, if rho = 0, this is the
Peto and Peto modification to the Gehan-Wilcoxon test.
weight: User specified function that accepts as its sole arguement
an array of times, and returns an array of weights for each time
to be used in the test
Returns
-------
An array whose zeroth element is the chi-square test statistic for
the global null hypothesis, that all survival curves are equal,
the index one element is degrees of freedom for the test, and the
index two element is the p-value for the test.
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> km = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves using the log rank test
for exog = 0.0645 and exog = -0.03957, the index one element
is the degrees of freedom for the test, and the index two element
is the p-value for the test
>>> wilcoxon = km.test_diff([0.0645,-0.03957], rho=1)
wilcoxon is the equivalent information as log_rank, but for the
Peto and Peto modification to the Gehan-Wilcoxon test.
User specified weight functions
>>> log_rank = km3.test_diff([0.0645,-0.03957], weight=np.ones_like)
This is equivalent to the log rank test
More than two groups
>>> log_rank = km.test_diff([0.0645,-0.03957,0.01138])
The test can be performed with arbitrarily many groups, so long as
they are all in the column exog
"""
groups = np.asarray(groups)
if self.exog == None:
raise ValueError("Need an exogenous variable for logrank test")
elif (np.in1d(groups,self.groups)).all():
data = self.data[np.in1d(self.data[:,self.exog],groups)]
t = ((data[:,self.endog]).astype(float)).astype(int)
tind = np.unique(t)
NK = []
N = []
D = []
Z = []
if rho != None and weight != None:
raise ValueError("Must use either rho or weights, not both")
elif rho != None:
s = KaplanMeier(data,self.endog,censoring=self.censoring)
s.fit()
s = (s.results[0][0]) ** (rho)
s = np.r_[1,s[:-1]]
elif weight != None:
s = weight(tind)
else:
s = np.ones_like(tind)
if self.censoring == None:
for g in groups:
dk = np.bincount((t[data[:,self.exog] == g]))
d = np.bincount(t)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
dk = dk[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
nk = len(data[data[:,self.exog] == g]) - dkSum[:-1]
n = len(data) - dSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
else:
for g in groups:
censoring = ((data[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
censored = np.bincount(t,reverseCensoring)
ck = np.bincount((t[data[:,self.exog] == g]),
reverseCensoring[data[:,self.exog] == g])
dk = np.bincount((t[data[:,self.exog] == g]),
censoring[data[:,self.exog] == g])
d = np.bincount(t,censoring)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
ck = np.r_[ck,[0]*dif]
dk = dk[:,list(tind)]
ck = ck[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
ck = ck.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
ck = np.cumsum(ck)
ck = np.r_[0,ck]
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
censored = censored[:,list(tind)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
nk = (len(data[data[:,self.exog] == g]) - dkSum[:-1]
- ck[:-1])
n = len(data) - dSum[:-1] - censoredSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
Z = np.array(Z)
N = np.array(N)
D = np.array(D)
NK = np.array(NK)
sigma = -1 * np.dot((NK/N) * ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(NK/N))
np.fill_diagonal(sigma, np.diagonal(np.dot((NK/N)
* ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(1 - (NK/N)))))
chisq = np.dot(np.transpose(Z),np.dot(la.pinv(sigma), Z))
df = len(groups) - 1
return np.array([chisq, df, stats.chi2.sf(chisq,df)])
else:
raise ValueError("groups must be in column exog")
| bsd-3-clause | 52ec49ecb1078ededdcf77e362f01971 | 34.91984 | 84 | 0.483765 | 3.637175 | false | true | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/regression/penalized.py | 4 | 12034 | # -*- coding: utf-8 -*-
"""linear model with Theil prior probabilistic restrictions, generalized Ridge
Created on Tue Dec 20 00:10:10 2011
Author: Josef Perktold
License: BSD-3
open issues
* selection of smoothing factor, strength of prior, cross validation
* GLS, does this really work this way
* None of inherited results have been checked yet,
I'm not sure if any need to be adjusted or if only interpretation changes
One question is which results are based on likelihood (residuals) and which
are based on "posterior" as for example bse and cov_params
* helper functions to construct priors?
* increasing penalization for ordered regressors, e.g. polynomials
* compare with random/mixed effects/coefficient, like estimated priors
there is something fishy with the result instance, some things, e.g.
normalized_cov_params, don't look like they update correctly as we
search over lambda -> some stale state again ?
I added df_model to result class using the hatmatrix, but df_model is defined
in model instance not in result instance. -> not clear where refactoring should
occur. df_resid doesn't get updated correctly.
problem with definition of df_model, it has 1 subtracted for constant
"""
import numpy as np
import statsmodels.base.model as base
from statsmodels.regression.linear_model import OLS, GLS, RegressionResults
def atleast_2dcols(x):
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
return x
class TheilGLS(GLS):
'''GLS with probabilistic restrictions
essentially Bayes with informative prior
note: I'm making up the GLS part, might work only for OLS
'''
def __init__(self, endog, exog, r_matrix, q_matrix=None, sigma_prior=None, sigma=None):
self.r_matrix = np.asarray(r_matrix)
self.q_matrix = atleast_2dcols(q_matrix)
if np.size(sigma_prior) == 1:
sigma_prior = sigma_prior * np.eye(self.r_matrix.shape[0]) #no numerical shortcuts
self.sigma_prior = sigma_prior
self.sigma_prior_inv = np.linalg.pinv(sigma_prior) #or inv
super(self.__class__, self).__init__(endog, exog, sigma=sigma)
def fit(self, lambd=1.):
#this does duplicate transformation, but I need resid not wresid
res_gls = GLS(self.endog, self.exog, sigma=self.sigma).fit()
self.res_gls = res_gls
sigma2_e = res_gls.mse_resid
r_matrix = self.r_matrix
q_matrix = self.q_matrix
sigma_prior_inv = self.sigma_prior_inv
x = self.wexog
y = self.wendog[:,None]
#why are sigma2_e * lambd multiplied, not ratio?
#larger lambd -> stronger prior (it's not the variance)
#print 'lambd inside fit', lambd
xpx = np.dot(x.T, x) + \
sigma2_e * lambd * np.dot(r_matrix.T, np.dot(sigma_prior_inv, r_matrix))
xpy = np.dot(x.T, y) + \
sigma2_e * lambd * np.dot(r_matrix.T, np.dot(sigma_prior_inv, q_matrix))
#xpy = xpy[:,None]
xpxi = np.linalg.pinv(xpx)
params = np.dot(xpxi, xpy) #or solve
params = np.squeeze(params)
self.normalized_cov_params = xpxi #why attach it to self, i.e. model?
lfit = TheilRegressionResults(self, params,
normalized_cov_params=xpxi)
lfit.penalization_factor = lambd
return lfit
def fit_minic(self):
#this doesn't make sense, since number of parameters stays unchanged
#need leave-one-out, gcv; or some penalization for weak priors
#added extra penalization for lambd
def get_bic(lambd):
#return self.fit(lambd).bic #+lambd #+ 1./lambd #added 1/lambd for checking
#return self.fit(lambd).gcv()
#return self.fit(lambd).cv()
return self.fit(lambd).aicc()
from scipy import optimize
lambd = optimize.fmin(get_bic, 1.)
return lambd
#TODO:
#I need the hatmatrix in the model if I want to do iterative fitting, e.g. GCV
#move to model or use it from a results instance inside the model,
# each call to fit returns results instance
class TheilRegressionResults(RegressionResults):
#cache
def hatmatrix_diag(self):
'''
diag(X' xpxi X)
where xpxi = (X'X + lambd * sigma_prior)^{-1}
Notes
-----
uses wexog, so this includes weights or sigma - check this case
not clear whether I need to multiply by sigmahalf, i.e.
(W^{-0.5} X) (X' W X)^{-1} (W^{-0.5} X)' or
(W X) (X' W X)^{-1} (W X)'
projection y_hat = H y or in terms of transformed variables (W^{-0.5} y)
might be wrong for WLS and GLS case
'''
xpxi = self.model.normalized_cov_params
#something fishy with self.normalized_cov_params in result, doesn't update
#print self.model.wexog.shape, np.dot(xpxi, self.model.wexog.T).shape
return (self.model.wexog * np.dot(xpxi, self.model.wexog.T).T).sum(1)
def hatmatrix_trace(self):
return self.hatmatrix_diag().sum()
#this doesn't update df_resid
@property #needs to be property or attribute (no call)
def df_model(self):
return self.hatmatrix_trace()
#Note: mse_resid uses df_resid not nobs-k_vars, which might differ if df_model, tr(H), is used
#in paper for gcv ess/nobs is used instead of mse_resid
def gcv(self):
return self.mse_resid / (1. - self.hatmatrix_trace() / self.nobs)**2
def cv(self):
return ((self.resid / (1. - self.hatmatrix_diag()))**2).sum() / self.nobs
def aicc(self):
aic = np.log(self.mse_resid) + 1
aic += 2 * (1. + self.hatmatrix_trace()) / (self.nobs - self.hatmatrix_trace() -2)
return aic
#contrast/restriction matrices, temporary location
def coef_restriction_meandiff(n_coeffs, n_vars=None, position=0):
reduced = np.eye(n_coeffs) - 1./n_coeffs
if n_vars is None:
return reduced
else:
full = np.zeros((n_coeffs, n_vars))
full[:, position:position+n_coeffs] = reduced
return full
def coef_restriction_diffbase(n_coeffs, n_vars=None, position=0, base_idx=0):
reduced = -np.eye(n_coeffs) #make all rows, drop one row later
reduced[:, base_idx] = 1
keep = range(n_coeffs)
del keep[base_idx]
reduced = np.take(reduced, keep, axis=0)
if n_vars is None:
return reduced
else:
full = np.zeros((n_coeffs-1, n_vars))
full[:, position:position+n_coeffs] = reduced
return full
def next_odd(d):
return d + (1 - d % 2)
def coef_restriction_diffseq(n_coeffs, degree=1, n_vars=None, position=0, base_idx=0):
#check boundaries, returns "valid" ?
if degree == 1:
diff_coeffs = [-1, 1]
n_points = 2
elif degree > 1:
from scipy import misc
n_points = next_odd(degree + 1) #next odd integer after degree+1
diff_coeffs = misc.central_diff_weights(n_points, ndiv=degree)
dff = np.concatenate((diff_coeffs, np.zeros(n_coeffs - len(diff_coeffs))))
from scipy import linalg
reduced = linalg.toeplitz(dff, np.zeros(n_coeffs - len(diff_coeffs) + 1)).T
#reduced = np.kron(np.eye(n_coeffs-n_points), diff_coeffs)
if n_vars is None:
return reduced
else:
full = np.zeros((n_coeffs-1, n_vars))
full[:, position:position+n_coeffs] = reduced
return full
##
## R = np.c_[np.zeros((n_groups, k_vars-1)), np.eye(n_groups)]
## r = np.zeros(n_groups)
## R = np.c_[np.zeros((n_groups-1, k_vars)),
## np.eye(n_groups-1)-1./n_groups * np.ones((n_groups-1, n_groups-1))]
if __name__ == '__main__':
import numpy as np
import statsmodels.api as sm
examples = [2]
np.random.seed(765367)
np.random.seed(97653679)
nsample = 100
x = np.linspace(0,10, nsample)
X = sm.add_constant(np.column_stack((x, x**2, (x/5.)**3)), prepend=True)
beta = np.array([10, 1, 0.1, 0.5])
y = np.dot(X, beta) + np.random.normal(size=nsample)
res_ols = sm.OLS(y, X).fit()
R = [[0, 0, 0 , 1]]
r = [0] #, 0, 0 , 0]
lambd = 1 #1e-4
mod = TheilGLS(y, X, r_matrix=R, q_matrix=r, sigma_prior=lambd)
res = mod.fit()
print res_ols.params
print res.params
#example 2
#I need more flexible penalization in example, the penalization should
#get stronger for higher order terms
#np.random.seed(1)
nobs = 200
k_vars = 10
k_true = 6
sig_e = 0.25 #0.5
x = np.linspace(-2,2, nobs)
#X = sm.add_constant(np.column_stack((x, x**2, (x/5.)**3)), prepend=True)
X = (x/x.max())[:,None]**np.arange(k_vars)
beta = np.zeros(k_vars)
beta[:k_true] = np.array([1, -2, 0.5, 1.5, -0.1, 0.1])[:k_true]
y_true = np.dot(X, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
res_ols = sm.OLS(y, X).fit()
#R = np.c_[np.zeros((k_vars-4, 4)), np.eye(k_vars-4)] # has two large true coefficients penalized
not_penalized = 4
R = np.c_[np.zeros((k_vars-not_penalized, not_penalized)), np.eye(k_vars-not_penalized)]
#increasingly strong penalization
R = np.c_[np.zeros((k_vars-not_penalized, not_penalized)), np.diag((1+2*np.arange(k_vars-not_penalized)))]
r = np.zeros(k_vars-not_penalized)
## R = -coef_restriction_diffseq(6, 1, n_vars=10, position=4) #doesn't make sense for polynomial
## R = np.vstack((R, np.zeros(R.shape[1])))
## R[-1,-1] = 1
r = np.zeros(R.shape[0])
lambd = 2 #1e-4
mod = TheilGLS(y, X, r_matrix=R, q_matrix=r, sigma_prior=lambd)
res = mod.fit()
print res_ols.params
print res.params
res_bic = mod.fit_minic() #this will just return zero
res = mod.fit(res_bic)
print res_bic
for lambd in np.linspace(0, 80, 21):
res_l = mod.fit(lambd)
#print lambd, res_l.params[-2:], res_l.bic, res_l.bic + 1./lambd, res.df_model
print lambd, res_l.params[-2:], res_l.bic, res.df_model, np.trace(res.normalized_cov_params)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(beta, 'k-o', label='true')
plt.plot(res_ols.params, '-o', label='ols')
plt.plot(res.params, '-o', label='theil')
plt.legend()
plt.title('Polynomial fitting: estimated coefficients')
plt.figure()
plt.plot(y, 'o')
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, '-', label='ols')
plt.plot(res.fittedvalues, '-', label='theil')
plt.legend()
plt.title('Polynomial fitting: fitted values')
#plt.show()
if 3 in examples:
#example 3
nobs = 600
nobs_i = 20
n_groups = nobs // nobs_i
k_vars = 3
from statsmodels.sandbox.panel.random_panel import PanelSample
dgp = PanelSample(nobs, k_vars, n_groups)
dgp.group_means = 2 + np.random.randn(n_groups) #add random intercept
print 'seed', dgp.seed
y = dgp.generate_panel()
X = np.column_stack((dgp.exog[:,1:],
dgp.groups[:,None] == np.arange(n_groups)))
res_ols = sm.OLS(y, X).fit()
R = np.c_[np.zeros((n_groups, k_vars-1)), np.eye(n_groups)]
r = np.zeros(n_groups)
R = np.c_[np.zeros((n_groups-1, k_vars)),
np.eye(n_groups-1)-1./n_groups * np.ones((n_groups-1, n_groups-1))]
r = np.zeros(n_groups-1)
R[:, k_vars-1] = -1
lambd = 1 #1e-4
mod = TheilGLS(y, X, r_matrix=R, q_matrix=r, sigma_prior=lambd)
res = mod.fit()
print res.params
params_l = []
for lambd in np.linspace(0, 20, 21):
params_l.append(mod.fit(5.*lambd).params)
params_l = np.array(params_l)
plt.figure()
plt.plot(params_l.T)
plt.title('Panel Data with random intercept: shrinkage to being equal')
plt.xlabel('parameter index')
plt.figure()
plt.plot(params_l[:,k_vars:])
plt.title('Panel Data with random intercept: shrinkage to being equal')
plt.xlabel('strength of prior')
#plt.show()
| bsd-3-clause | b8ccc613ab41c7ed13b0aa17e0ac537b | 32.151515 | 110 | 0.609855 | 3.077749 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/miscmodels/nonlinls.py | 3 | 9316 | '''Non-linear least squares
Author: Josef Perktold based on scipy.optimize.curve_fit
'''
import numpy as np
from scipy import optimize
from statsmodels.base.model import Model
class Results(object):
'''just a dummy placeholder for now
most results from RegressionResults can be used here
'''
pass
##def getjaccov(retval, n):
## '''calculate something and raw covariance matrix from return of optimize.leastsq
##
## I cannot figure out how to recover the Jacobian, or whether it is even
## possible
##
## this is a partial copy of scipy.optimize.leastsq
## '''
## info = retval[-1]
## #n = len(x0) #nparams, where do I get this
## cov_x = None
## if info in [1,2,3,4]:
## from numpy.dual import inv
## from numpy.linalg import LinAlgError
## perm = np.take(np.eye(n), retval[1]['ipvt']-1,0)
## r = np.triu(np.transpose(retval[1]['fjac'])[:n,:])
## R = np.dot(r, perm)
## try:
## cov_x = inv(np.dot(np.transpose(R),R))
## except LinAlgError:
## print 'cov_x not available'
## pass
## return r, R, cov_x
##
##def _general_function(params, xdata, ydata, function):
## return function(xdata, *params) - ydata
##
##def _weighted_general_function(params, xdata, ydata, function, weights):
## return weights * (function(xdata, *params) - ydata)
##
class NonlinearLS(Model): #or subclass a model
'''Base class for estimation of a non-linear model with least squares
This class is supposed to be subclassed, and the subclass has to provide a method
`_predict` that defines the non-linear function `f(params) that is predicting the endogenous
variable. The model is assumed to be
:math: y = f(params) + error
and the estimator minimizes the sum of squares of the estimated error.
:math: min_parmas \sum (y - f(params))**2
f has to return the prediction for each observation. Exogenous or explanatory variables
should be accessed as attributes of the class instance, and can be given as arguments
when the instance is created.
Warning:
Weights are not correctly handled yet in the results statistics,
but included when estimating the parameters.
similar to scipy.optimize.curve_fit
API difference: params are array_like not split up, need n_params information
includes now weights similar to curve_fit
no general sigma yet (OLS and WLS, but no GLS)
This is currently holding on to intermediate results that are not necessary
but useful for testing.
Fit returns and instance of RegressionResult, in contrast to the linear
model, results in this case are based on a local approximation, essentially
y = f(X, params) is replaced by y = grad * params where grad is the Gradient
or Jacobian with the shape (nobs, nparams). See for example Greene
Examples
--------
class Myfunc(NonlinearLS):
def _predict(self, params):
x = self.exog
a, b, c = params
return a*np.exp(-b*x) + c
Ff we have data (y, x), we can create an instance and fit it with
mymod = Myfunc(y, x)
myres = mymod.fit(nparams=3)
and use the non-linear regression results, for example
myres.params
myres.bse
myres.tvalues
'''
#NOTE: This needs to call super for data checking
def __init__(self, endog=None, exog=None, weights=None, sigma=None,
missing='none'):
self.endog = endog
self.exog = exog
if not sigma is None:
sigma = np.asarray(sigma)
if sigma.ndim < 2:
self.sigma = sigma
self.weights = 1./sigma
else:
raise ValueError('correlated errors are not handled yet')
else:
self.weights = None
def predict(self, exog, params=None):
#copied from GLS, Model has different signature
return self._predict(params)
def _predict(self, params):
pass
def start_value(self):
return None
def geterrors(self, params, weights=None):
if weights is None:
if self.weights is None:
return self.endog - self._predict(params)
else:
weights = self.weights
return weights * (self.endog - self._predict(params))
def errorsumsquares(self, params):
return (self.geterrors(params)**2).sum()
def fit(self, start_value=None, nparams=None, **kw):
#if hasattr(self, 'start_value'):
#I added start_value even if it's empty, not sure about it
#but it makes a visible placeholder
if not start_value is None:
p0 = start_value
else:
#nesting so that start_value is only calculated if it is needed
p0 = self.start_value()
if not p0 is None:
pass
elif not nparams is None:
p0 = 0.1 * np.ones(nparams)
else:
raise ValueError('need information about start values for' +
'optimization')
func = self.geterrors
res = optimize.leastsq(func, p0, full_output=1, **kw)
(popt, pcov, infodict, errmsg, ier) = res
if ier not in [1,2,3,4]:
msg = "Optimal parameters not found: " + errmsg
raise RuntimeError(msg)
err = infodict['fvec']
ydata = self.endog
if (len(ydata) > len(p0)) and pcov is not None:
#this can use the returned errors instead of recalculating
s_sq = (err**2).sum()/(len(ydata)-len(p0))
pcov = pcov * s_sq
else:
pcov = None
self.df_resid = len(ydata)-len(p0)
self.df_model = len(p0)
fitres = Results()
fitres.params = popt
fitres.pcov = pcov
fitres.rawres = res
self.wendog = self.endog #add weights
self.wexog = self.jac_predict(popt)
pinv_wexog = np.linalg.pinv(self.wexog)
self.normalized_cov_params = np.dot(pinv_wexog,
np.transpose(pinv_wexog))
#TODO: check effect of `weights` on result statistics
#I think they are correctly included in cov_params
#maybe not anymore, I'm not using pcov of leastsq
#direct calculation with jac_predict misses the weights
## if not weights is None
## fitres.wexogw = self.weights * self.jacpredict(popt)
from statsmodels.regression import RegressionResults
results = RegressionResults
beta = popt
lfit = RegressionResults(self, beta,
normalized_cov_params=self.normalized_cov_params)
lfit.fitres = fitres #mainly for testing
self._results = lfit
return lfit
def fit_minimal(self, start_value):
'''minimal fitting with no extra calculations'''
func = self.geterrors
res = optimize.leastsq(func, start_value, full_output=0, **kw)
return res
def fit_random(self, ntries=10, rvs_generator=None, nparams=None):
'''fit with random starting values
this could be replaced with a global fitter
'''
if nparams is None:
nparams = self.nparams
if rvs_generator is None:
rvs = np.random.uniform(low=-10, high=10, size=(ntries, nparams))
else:
rvs = rvs_generator(size=(ntries, nparams))
results = np.array([np.r_[self.fit_minimal(rv), rv] for rv in rvs])
#selct best results and check how many solutions are within 1e-6 of best
#not sure what leastsq returns
return results
def jac_predict(self, params):
'''jacobian of prediction function using complex step derivative
This assumes that the predict function does not use complex variable
but is designed to do so.
'''
from statsmodels.tools.numdiff import approx_fprime_cs
jaccs_err = approx_fprime_cs(params, self._predict)
return jaccs_err
class Myfunc(NonlinearLS):
#predict model.Model has a different signature
## def predict(self, params, exog=None):
## if not exog is None:
## x = exog
## else:
## x = self.exog
## a, b, c = params
## return a*np.exp(-b*x) + c
def _predict(self, params):
x = self.exog
a, b, c = params
return a*np.exp(-b*x) + c
if __name__ == '__main__':
def func0(x, a, b, c):
return a*np.exp(-b*x) + c
def func(params, x):
a, b, c = params
return a*np.exp(-b*x) + c
def error(params, x, y):
return y - func(params, x)
def error2(params, x, y):
return (y - func(params, x))**2
x = np.linspace(0,4,50)
params = np.array([2.5, 1.3, 0.5])
y0 = func(params, x)
y = y0 + 0.2*np.random.normal(size=len(x))
res = optimize.leastsq(error, params, args=(x, y), full_output=True)
## r, R, c = getjaccov(res[1:], 3)
mod = Myfunc(y, x)
resmy = mod.fit(nparams=3)
cf_params, cf_pcov = optimize.curve_fit(func0, x, y)
cf_bse = np.sqrt(np.diag(cf_pcov))
print res[0]
print cf_params
print resmy.params
print cf_bse
print resmy.bse
| bsd-3-clause | 807804cbc44729be414a4b9a4d4f1a5c | 29.148867 | 96 | 0.598755 | 3.686585 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/robust/norms.py | 3 | 19696 | import numpy as np
#TODO: add plots to weighting functions for online docs.
class RobustNorm(object):
"""
The parent class for the norms used for robust regression.
Lays out the methods expected of the robust norms to be used
by statsmodels.RLM.
Parameters
----------
None :
Some subclasses have optional tuning constants.
References
----------
PJ Huber. 'Robust Statistics' John Wiley and Sons, Inc., New York, 1981.
DC Montgomery, EA Peck. 'Introduction to Linear Regression Analysis',
John Wiley and Sons, Inc., New York, 2001.
R Venables, B Ripley. 'Modern Applied Statistics in S'
Springer, New York, 2002.
See Also
--------
statsmodels.rlm for more information on how the estimators are used
and the inputs for the methods of RobustNorm and subclasses.
Notes
-----
Currently only M-estimators are available.
"""
def rho(self, z):
"""
The robust criterion estimator function.
Abstract method:
-2 loglike used in M-estimator
"""
raise NotImplementedError
def psi(self, z):
"""
Derivative of rho. Sometimes referred to as the influence function.
Abstract method:
psi = rho'
"""
raise NotImplementedError
def weights(self, z):
"""
Returns the value of psi(z) / z
Abstract method:
psi(z) / z
"""
raise NotImplementedError
def psi_deriv(self, z):
'''
Deriative of psi. Used to obtain robust covariance matrix.
See statsmodels.rlm for more information.
Abstract method:
psi_derive = psi'
'''
raise NotImplementedError
def __call__(self, z):
"""
Returns the value of estimator rho applied to an input
"""
return self.rho(z)
class LeastSquares(RobustNorm):
"""
Least squares rho for M-estimation and its derived functions.
See also
--------
statsmodels.robust.norms.RobustNorm for the methods.
"""
def rho(self, z):
"""
The least squares estimator rho function
Parameters
-----------
z : array
1d array
Returns
-------
rho : array
rho(z) = (1/2.)*z**2
"""
return z**2 * 0.5
def psi(self, z):
"""
The psi function for the least squares estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z
"""
return np.asarray(z)
def weights(self, z):
"""
The least squares estimator weighting function for the IRLS algorithm.
The psi function scaled by the input z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = np.ones(z.shape)
"""
z = np.asarray(z)
return np.ones(z.shape, np.float64)
def psi_deriv(self, z):
"""
The derivative of the least squares psi function.
Returns
-------
psi_deriv : array
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.ones(z.shape, np.float64)
class HuberT(RobustNorm):
"""
Huber's T for M estimation.
Parameters
----------
t : float, optional
The tuning constant for Huber's t function. The default value is
1.345.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, t=1.345):
self.t = t
def _subset(self, z):
"""
Huber's T is defined piecewise over the range for z
"""
z = np.asarray(z)
return np.less_equal(np.fabs(z), self.t)
def rho(self, z):
"""
The robust criterion function for Huber's t.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = .5*z**2 for \|z\| <= t
rho(z) = \|z\|*t - .5*t**2 for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
return (test * 0.5 * z**2 +
(1 - test) * (np.fabs(z) * self.t - 0.5 * self.t**2))
def psi(self, z):
"""
The psi function for Huber's t estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z for \|z\| <= t
psi(z) = sign(z)*t for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
return test * z + (1 - test) * self.t * np.sign(z)
def weights(self, z):
"""
Huber's t weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = 1 for \|z\| <= t
weights(z) = t/\|z\| for \|z\| > t
"""
z = np.asarray(z)
test = self._subset(z)
absz = np.fabs(z)
absz[test] = 1.0
return test + (1 - test) * self.t / absz
def psi_deriv(self, z):
"""
The derivative of Huber's t psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.less_equal(np.fabs(z), self.t)
#TODO: untested, but looks right. RamsayE not available in R or SAS?
class RamsayE(RobustNorm):
"""
Ramsay's Ea for M estimation.
Parameters
----------
a : float, optional
The tuning constant for Ramsay's Ea function. The default value is
0.3.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, a = .3):
self.a = a
def rho(self, z):
"""
The robust criterion function for Ramsay's Ea.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = a**-2 * (1 - exp(-a*\|z\|)*(1 + a*\|z\|))
"""
z = np.asarray(z)
return (1 - np.exp(-self.a * np.fabs(z)) *
(1 + self.a * np.fabs(z))) / self.a**2
def psi(self, z):
"""
The psi function for Ramsay's Ea estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z*exp(-a*\|z\|)
"""
z = np.asarray(z)
return z * np.exp(-self.a * np.fabs(z))
def weights(self, z):
"""
Ramsay's Ea weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = exp(-a*\|z\|)
"""
z = np.asarray(z)
return np.exp(-self.a * np.fabs(z))
def psi_deriv(self, z):
"""
The derivative of Ramsay's Ea psi function.
Notes
-----
Used to estimate the robust covariance matrix.
"""
return np.exp(-self.a * np.fabs(z)) + z**2*\
np.exp(-self.a*np.fabs(z))*-self.a/np.fabs(z)
class AndrewWave(RobustNorm):
"""
Andrew's wave for M estimation.
Parameters
----------
a : float, optional
The tuning constant for Andrew's Wave function. The default value is
1.339.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, a = 1.339):
self.a = a
def _subset(self, z):
"""
Andrew's wave is defined piecewise over the range of z.
"""
z = np.asarray(z)
return np.less_equal(np.fabs(z), self.a * np.pi)
def rho(self, z):
"""
The robust criterion function for Andrew's wave.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = a*(1-cos(z/a)) for \|z\| <= a*pi
rho(z) = 2*a for \|z\| > a*pi
"""
a = self.a
z = np.asarray(z)
test = self._subset(z)
return (test * a * (1 - np.cos(z / a)) +
(1 - test) * 2 * a)
def psi(self, z):
"""
The psi function for Andrew's wave
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = sin(z/a) for \|z\| <= a*pi
psi(z) = 0 for \|z\| > a*pi
"""
a = self.a
z = np.asarray(z)
test = self._subset(z)
return test * np.sin(z / a)
def weights(self, z):
"""
Andrew's wave weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = sin(z/a)/(z/a) for \|z\| <= a*pi
weights(z) = 0 for \|z\| > a*pi
"""
a = self.a
z = np.asarray(z)
test = self._subset(z)
return test * np.sin(z / a) / (z / a)
def psi_deriv(self, z):
"""
The derivative of Andrew's wave psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
test = self._subset(z)
return test*np.cos(z / self.a)/self.a
#TODO: this is untested
class TrimmedMean(RobustNorm):
"""
Trimmed mean function for M-estimation.
Parameters
----------
c : float, optional
The tuning constant for Ramsay's Ea function. The default value is
2.0.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, c=2.):
self.c = c
def _subset(self, z):
"""
Least trimmed mean is defined piecewise over the range of z.
"""
z = np.asarray(z)
return np.less_equal(np.fabs(z), self.c)
def rho(self, z):
"""
The robust criterion function for least trimmed mean.
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = (1/2.)*z**2 for \|z\| <= c
rho(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
test = self._subset(z)
return test * z**2 * 0.5
def psi(self, z):
"""
The psi function for least trimmed mean
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z for \|z\| <= c
psi(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
test = self._subset(z)
return test * z
def weights(self, z):
"""
Least trimmed mean weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = 1 for \|z\| <= c
weights(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
test = self._subset(z)
return test
def psi_deriv(self, z):
"""
The derivative of least trimmed mean psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
test = self._subset(z)
return test
class Hampel(RobustNorm):
"""
Hampel function for M-estimation.
Parameters
----------
a : float, optional
b : float, optional
c : float, optional
The tuning constants for Hampel's function. The default values are
a,b,c = 2, 4, 8.
See also
--------
statsmodels.robust.norms.RobustNorm
"""
def __init__(self, a = 2., b = 4., c = 8.):
self.a = a
self.b = b
self.c = c
def _subset(self, z):
"""
Hampel's function is defined piecewise over the range of z
"""
z = np.fabs(np.asarray(z))
t1 = np.less_equal(z, self.a)
t2 = np.less_equal(z, self.b) * np.greater(z, self.a)
t3 = np.less_equal(z, self.c) * np.greater(z, self.b)
return t1, t2, t3
def rho(self, z):
"""
The robust criterion function for Hampel's estimator
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = (1/2.)*z**2 for \|z\| <= a
rho(z) = a*\|z\| - 1/2.*a**2 for a < \|z\| <= b
rho(z) = a*(c*\|z\|-(1/2.)*z**2)/(c-b) for b < \|z\| <= c
rho(z) = a*(b + c - a) for \|z\| > c
"""
z = np.fabs(z)
a = self.a; b = self.b; c = self.c
t1, t2, t3 = self._subset(z)
v = (t1 * z**2 * 0.5 +
t2 * (a * z - a**2 * 0.5) +
t3 * (a * (c * z - z**2 * 0.5) / (c - b) - 7 * a**2 / 6.) +
(1 - t1 + t2 + t3) * a * (b + c - a))
return v
def psi(self, z):
"""
The psi function for Hampel's estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z for \|z\| <= a
psi(z) = a*sign(z) for a < \|z\| <= b
psi(z) = a*sign(z)*(c - \|z\|)/(c-b) for b < \|z\| <= c
psi(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
a = self.a; b = self.b; c = self.c
t1, t2, t3 = self._subset(z)
s = np.sign(z)
z = np.fabs(z)
v = s * (t1 * z +
t2 * a*s +
t3 * a*s * (c - z) / (c - b))
return v
def weights(self, z):
"""
Hampel weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
weights(z) = 1 for \|z\| <= a
weights(z) = a/\|z\| for a < \|z\| <= b
weights(z) = a*(c - \|z\|)/(\|z\|*(c-b)) for b < \|z\| <= c
weights(z) = 0 for \|z\| > c
"""
z = np.asarray(z)
a = self.a; b = self.b; c = self.c
t1, t2, t3 = self._subset(z)
v = (t1 +
t2 * a/np.fabs(z) +
t3 * a*(c-np.fabs(z))/(np.fabs(z)*(c-b)))
v[np.where(np.isnan(v))]=1. # for some reason 0 returns a nan?
return v
def psi_deriv(self, z):
t1, t2, t3 = self._subset(z)
return t1 + t3 * (self.a*np.sign(z)*z)/(np.fabs(z)*(self.c-self.b))
class TukeyBiweight(RobustNorm):
"""
Tukey's biweight function for M-estimation.
Parameters
----------
c : float, optional
The tuning constant for Tukey's Biweight. The default value is
c = 4.685.
Notes
-----
Tukey's biweight is sometime's called bisquare.
"""
def __init__(self, c = 4.685):
self.c = c
def _subset(self, z):
"""
Tukey's biweight is defined piecewise over the range of z
"""
z = np.fabs(np.asarray(z))
return np.less_equal(z, self.c)
def rho(self, z):
"""
The robust criterion function for Tukey's biweight estimator
Parameters
----------
z : array-like
1d array
Returns
-------
rho : array
rho(z) = -(1 - (z/c)**2)**3 * c**2/6. for \|z\| <= R
rho(z) = 0 for \|z\| > R
"""
subset = self._subset(z)
return -(1 - (z / self.c)**2)**3 * subset * self.c**2 / 6.
def psi(self, z):
"""
The psi function for Tukey's biweight estimator
The analytic derivative of rho
Parameters
----------
z : array-like
1d array
Returns
-------
psi : array
psi(z) = z*(1 - (z/c)**2)**2 for \|z\| <= R
psi(z) = 0 for \|z\| > R
"""
z = np.asarray(z)
subset = self._subset(z)
return z * (1 - (z / self.c)**2)**2 * subset
def weights(self, z):
"""
Tukey's biweight weighting function for the IRLS algorithm
The psi function scaled by z
Parameters
----------
z : array-like
1d array
Returns
-------
weights : array
psi(z) = (1 - (z/c)**2)**2 for \|z\| <= R
psi(z) = 0 for \|z\| > R
"""
subset = self._subset(z)
return (1 - (z / self.c)**2)**2 * subset
def psi_deriv(self, z):
"""
The derivative of Tukey's biweight psi function
Notes
-----
Used to estimate the robust covariance matrix.
"""
subset = self._subset(z)
return subset*((1 - (z/self.c)**2)**2 - (4*z**2/self.c**2) *\
(1-(z/self.c)**2))
def estimate_location(a, scale, norm=None, axis=0, initial=None,
maxiter=30, tol=1.0e-06):
"""
M-estimator of location using self.norm and a current
estimator of scale.
This iteratively finds a solution to
norm.psi((a-mu)/scale).sum() == 0
Parameters
----------
a : array
Array over which the location parameter is to be estimated
scale : array
Scale parameter to be used in M-estimator
norm : RobustNorm, optional
Robust norm used in the M-estimator. The default is HuberT().
axis : int, optional
Axis along which to estimate the location parameter. The default is 0.
initial : array, optional
Initial condition for the location parameter. Default is None, which
uses the median of a.
niter : int, optional
Maximum number of iterations. The default is 30.
tol : float, optional
Toleration for convergence. The default is 1e-06.
Returns
--------
mu : array
Estimate of location
"""
if norm is None:
norm = HuberT()
if initial is None:
mu = np.median(a, axis)
else:
mu = initial
for iter in range(maxiter):
W = norm.weights((a-mu)/scale)
nmu = np.sum(W*a, axis) / np.sum(W, axis)
if np.alltrue(np.less(np.fabs(mu - nmu), scale * tol)):
return nmu
else:
mu = nmu
raise ValueError("location estimator failed to converge in %d iterations"\
% maxiter)
| bsd-3-clause | 8128919ac92397db89b56018b27892e0 | 22.09027 | 79 | 0.457098 | 3.745911 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/examples/example_maxent.py | 6 | 1273 | """
This is an example of using scipy.maxentropy to solve Jaynes' dice problem
See Golan, Judge, and Miller Section 2.3
"""
from scipy import maxentropy
import numpy as np
samplespace = [1., 2., 3., 4., 5., 6.]
def sump(x):
return x in samplespace
def meanp(x):
return np.mean(x)
# Set the constraints
# 1) We have a proper probability
# 2) The mean is equal to...
F = [sump, meanp]
model = maxentropy.model(F, samplespace)
# set the desired feature expectations
K = np.ones((5,2))
K[:,1] = [2.,3.,3.5,4.,5.]
model.verbose = False
for i in range(K.shape[0]):
model.fit(K[i])
# Output the distribution
print "\nFitted model parameters are:\n" + str(model.params)
print "\nFitted distribution is:"
p = model.probdist()
for j in range(len(model.samplespace)):
x = model.samplespace[j]
print "y = %-15s\tx = %-15s" %(str(K[i,1])+":",str(x) + ":") + \
" p(x) = "+str(p[j])
# Now show how well the constraints are satisfied:
print
print "Desired constraints:"
print "\tsum_{i}p_{i}= 1"
print "\tE[X] = %-15s" % str(K[i,1])
print
print "Actual expectations under the fitted model:"
print "\tsum_{i}p_{i} =", np.sum(p)
print "\tE[X] = " + str(np.sum(p*np.arange(1,7)))
| bsd-3-clause | eabe52655ab86ccb21019cfbd9d671e5 | 24.46 | 74 | 0.604085 | 2.873589 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/tsa/tests/results/arima111_results.py | 35 | 44167 | import numpy as np
llf = np.array([-241.75576160303])
nobs = np.array([ 202])
k = np.array([ 4])
k_exog = np.array([ 1])
sigma = np.array([ .79987660416529])
chi2 = np.array([ 342.91413339514])
df_model = np.array([ 2])
k_ar = np.array([ 1])
k_ma = np.array([ 1])
params = np.array([ .88084748605315,
.93989719451385,
-.7709851377434,
.79987660416529])
cov_params = np.array([ .15020189867396,
-.01642122563089,
.01456018801049,
-.00156750041014,
-.01642122563089,
.0032067778715,
-.00350387326241,
.00059634328354,
.01456018801049,
-.00350387326241,
.00480028434835,
-.00068065418463,
-.00156750041014,
.00059634328354,
-.00068065418463,
.00029322997097]).reshape(4,4)
xb = np.array([ .88084751367569,
.88084751367569,
.65303039550781,
.55365419387817,
.45908725261688,
.42810925841331,
.37837743759155,
.37686342000961,
.35719576478004,
.3220648765564,
.31943875551224,
.30907514691353,
.30120712518692,
.31383177638054,
.29652059078217,
.30856171250343,
.30095273256302,
.29171526432037,
.31331890821457,
.30463594198227,
.31990340352058,
.30126947164536,
.29703867435455,
.29884466528893,
.31037190556526,
.30912432074547,
.32505416870117,
.31537705659866,
.33494210243225,
.37874156236649,
.37366089224815,
.40859284996986,
.37640652060509,
.37692713737488,
.39422073960304,
.40755322575569,
.43472331762314,
.43878075480461,
.47569087147713,
.48725643754005,
.49617394804955,
.53683114051819,
.55128628015518,
.56243091821671,
.58791494369507,
.60756206512451,
.58892780542374,
.59145200252533,
.59339815378189,
.54422444105148,
.55698639154434,
.53304374217987,
.51458370685577,
.50035130977631,
.48937830328941,
.49780988693237,
.52120143175125,
.62369203567505,
.6182547211647,
.76608312129974,
.84627467393875,
.92499214410782,
.96879118680954,
1.0870156288147,
1.1105998754501,
1.0274360179901,
1.013991355896,
.98673474788666,
.96571969985962,
.84817039966583,
.85888928174973,
.86715340614319,
.85663330554962,
.93297851085663,
.90738350152969,
.88765007257462,
.92311006784439,
.96734017133713,
1.0690053701401,
1.1473876237869,
1.1740373373032,
1.3128218650818,
1.4704967737198,
1.5582785606384,
1.7273052930832,
1.8745132684708,
1.7853132486343,
1.7841064929962,
1.850741147995,
1.800768494606,
1.8466963768005,
1.7976499795914,
1.6078149080276,
1.3938897848129,
1.5498898029327,
1.3492304086685,
1.059396147728,
1.0217411518097,
1.0096007585526,
1.0002405643463,
1.0436969995499,
1.0603114366531,
1.0055546760559,
.99712115526199,
.92305397987366,
.9841884970665,
.92997401952744,
.90506774187088,
.9872123003006,
.61137217283249,
.65943044424057,
.67959040403366,
.77959072589874,
.87357920408249,
.91226226091385,
.95897603034973,
.96120971441269,
.99671375751495,
1.0409790277481,
1.0919979810715,
1.1144404411316,
1.2330915927887,
1.2401138544083,
1.161071896553,
1.3028255701065,
1.2938764095306,
1.3207612037659,
1.5610725879669,
1.4760913848877,
1.258552312851,
1.2090681791306,
1.1540271043777,
1.12848341465,
1.1087870597839,
1.0936040878296,
1.0987877845764,
1.0858948230743,
1.0590622425079,
.98770052194595,
1.0002481937408,
.94235575199127,
.93150353431702,
.97381073236465,
.9726470708847,
.98864215612411,
1.0347559452057,
.98585307598114,
.96503925323486,
.9996662735939,
1.0601476430893,
1.022319316864,
1.043828368187,
1.0604115724564,
.95495897531509,
.87365657091141,
.91232192516327,
.84078407287598,
.73495537042618,
.78849309682846,
.77909576892853,
.78874284029007,
.8637443780899,
.8540056347847,
.94784545898438,
.98641014099121,
1.0837067365646,
1.1925053596497,
1.1750392913818,
1.2460317611694,
1.1487410068512,
1.1075156927109,
.94060403108597,
.7950227856636,
.93615245819092,
.89293897151947,
.94407802820206,
1.0172899961472,
.93860250711441,
.86104601621628,
.91948908567429,
.99833220243454,
1.008442401886,
1.1175880432129,
1.2017351388931,
1.1483734846115,
1.2761443853378,
1.188849568367,
1.7296310663223,
1.4202431440353,
1.3675138950348,
1.445098400116,
1.031960606575,
1.1313284635544,
1.3214453458786,
1.3112732172012,
1.367110490799,
1.674845457077,
1.5979281663895,
2.064112663269,
1.3536450862885,
.30015936493874,
.36831066012383,
.64060544967651])
y = np.array([np.nan,
29.860847473145,
29.803030014038,
29.903654098511,
29.82908821106,
29.968111038208,
29.928377151489,
30.126863479614,
30.197195053101,
30.132064819336,
30.23943901062,
30.289073944092,
30.341207504272,
30.523830413818,
30.516519546509,
30.68856048584,
30.740953445435,
30.771715164185,
31.003318786621,
31.054636001587,
31.25990486145,
31.251270294189,
31.317039489746,
31.418846130371,
31.590372085571,
31.689123153687,
31.905054092407,
31.965375900269,
32.214942932129,
32.658740997314,
32.823661804199,
33.258590698242,
33.27640914917,
33.47692489624,
33.7942237854,
34.107555389404,
34.534721374512,
34.83878326416,
35.375694274902,
35.787254333496,
36.196174621582,
36.83683013916,
37.3512840271,
37.86243057251,
38.487915039063,
39.107563018799,
39.488929748535,
39.991455078125,
40.49340057373,
40.644222259521,
41.156986236572,
41.433044433594,
41.714584350586,
42.000350952148,
42.289379119873,
42.697811126709,
43.221202850342,
44.323692321777,
44.818256378174,
46.366081237793,
47.64627456665,
49.024990081787,
50.26879119873,
52.087017059326,
53.410598754883,
54.027435302734,
55.01399230957,
55.886737823486,
56.765720367432,
56.948169708252,
57.858890533447,
58.767154693604,
59.556632995605,
60.93297958374,
61.707382202148,
62.487648010254,
63.623111724854,
64.867340087891,
66.569007873535,
68.247383117676,
69.674034118652,
71.912818908691,
74.470497131348,
76.758277893066,
79.72730255127,
82.774513244629,
84.385314941406,
86.484100341797,
89.050735473633,
90.900764465332,
93.346694946289,
95.197654724121,
96.007820129395,
96.393890380859,
99.04988861084,
99.449226379395,
98.959396362305,
99.821746826172,
100.80960083008,
101.80024719238,
103.1436920166,
104.36031341553,
105.10555267334,
106.09712219238,
106.62305450439,
107.98419189453,
108.62997436523,
109.40506744385,
110.88721466064,
109.31137084961,
110.15943145752,
110.87958526611,
112.17959594727,
113.57357788086,
114.71226501465,
115.95897674561,
116.9612121582,
118.1967086792,
119.54097747803,
120.99199676514,
122.31443786621,
124.33309173584,
125.74011230469,
126.56107330322,
128.80282592773,
130.19386291504,
131.82075500488,
134.96105957031,
136.17608642578,
136.35855102539,
137.40905761719,
138.35401916504,
139.42848205566,
140.50877380371,
141.59359741211,
142.79878234863,
143.88589477539,
144.85906982422,
145.48770141602,
146.60025024414,
147.24235534668,
148.13150024414,
149.37380981445,
150.3726348877,
151.48864746094,
152.83476257324,
153.58586120605,
154.46504211426,
155.69966125488,
157.16015625,
158.0223236084,
159.24382019043,
160.46040344238,
160.85494995117,
161.27365112305,
162.41232299805,
162.84078979492,
162.93495178223,
163.98849487305,
164.67909240723,
165.48873901367,
166.76373291016,
167.55400085449,
169.0478515625,
170.2864074707,
171.98370361328,
173.89250183105,
175.07502746582,
176.84603881836,
177.54873657227,
178.50750732422,
178.5406036377,
178.49502563477,
180.23616027832,
180.89294433594,
182.14407348633,
183.61729431152,
184.13859558105,
184.56105041504,
185.81948852539,
187.29833984375,
188.40843200684,
190.21759033203,
192.00173950195,
192.9483795166,
195.07614135742,
195.88883972168,
200.92962646484,
200.82023620605,
202.06750488281,
204.1450958252,
202.93196105957,
204.70533752441,
207.24143981934,
208.6492767334,
210.50010681152,
214.16984558105,
215.59492492676,
220.67411804199,
218.24264526367,
212.47415161133,
213.03932189941,
215.10960388184])
resid = np.array([np.nan,
-.71084743738174,
-.45302960276604,
-.5336537361145,
-.28908717632294,
-.41811093688011,
-.17837668955326,
-.28686326742172,
-.38719645142555,
-.21206425130367,
-.25943928956985,
-.24907378852367,
-.13120894134045,
-.3038315474987,
-.13652075827122,
-.24856032431126,
-.26095372438431,
-.0817142650485,
-.25331944227219,
-.11463540792465,
-.30990317463875,
-.2312697917223,
-.19703827798367,
-.13884480297565,
-.21037344634533,
-.10912357270718,
-.25505447387695,
-.08537751436234,
.06505750864744,
-.20873957872391,
.02633681893349,
-.35858979821205,
-.1764095723629,
-.07692407816648,
-.09422151744366,
-.00755552388728,
-.13472028076649,
.06121923774481,
-.07569316774607,
-.08725491166115,
.10382451862097,
-.03683112934232,
-.05128625407815,
.03757134452462,
.0120835499838,
-.20756052434444,
-.08892779797316,
-.09145200997591,
-.3934012055397,
-.04422445222735,
-.25698333978653,
-.23304453492165,
-.21458448469639,
-.2003520578146,
-.08937677741051,
.00219011562876,
.47879853844643,
-.12369203567505,
.78174299001694,
.43391767144203,
.4537245631218,
.27500861883163,
.73120957612991,
.21298357844353,
-.41059911251068,
-.02743596211076,
-.11398979276419,
-.08673703670502,
-.66572046279907,
.05183110013604,
.04111221805215,
-.06715416908264,
.44336593151093,
-.13297925889492,
-.1073842421174,
.21235218644142,
.27689066529274,
.63265830278397,
.53099316358566,
.25261387228966,
.92596107721329,
1.0871796607971,
.72950023412704,
1.2417244911194,
1.1726962327957,
-.17451636493206,
.31468516588211,
.71589350700378,
.04926039651036,
.59923303127289,
.05330519750714,
-.79764997959137,
-1.0078164339066,
1.1061102151871,
-.94989138841629,
-1.5492273569107,
-.15939457714558,
-.02174116671085,
-.00960071571171,
.29975482821465,
.15630762279034,
-.2603160738945,
-.00555467186496,
-.3971226811409,
.37694907188416,
-.28419154882431,
-.12997098267078,
.49493381381035,
-2.1872169971466,
.18863087892532,
.04056651890278,
.52041417360306,
.52040469646454,
.22642692923546,
.28773468732834,
.0410239957273,
.2387872338295,
.30328929424286,
.35902243852615,
.20799747109413,
.78556102514267,
.16690990328789,
-.34011232852936,
.93892657756805,
.0971682742238,
.30612966418266,
1.5792326927185,
-.26106956601143,
-1.0760822296143,
-.15856145322323,
-.2090682387352,
-.05402099713683,
-.02849259786308,
-.00878097955137,
.10639289021492,
.00121826829854,
-.08589478582144,
-.35906526446342,
.11230555176735,
-.30025118589401,
-.04236188530922,
.26849341392517,
.02618926763535,
.12735903263092,
.31136092543602,
-.23475293815136,
-.08585914969444,
.23495768010616,
.40034285187721,
-.1601537913084,
.17767761647701,
.15616858005524,
-.56041151285172,
-.45495894551277,
.2263495028019,
-.41232195496559,
-.64078712463379,
.26504465937614,
-.08849616348743,
.02090725488961,
.41125410795212,
-.06374131888151,
.54600352048874,
.25215145945549,
.61358070373535,
.71629631519318,
.00749156065285,
.52497291564941,
-.44604399800301,
-.14874097704887,
-.90750348567963,
-.84061318635941,
.80498331785202,
-.23615552484989,
.30705797672272,
.45593112707138,
-.41729912161827,
-.43860253691673,
.33895090222359,
.48052009940147,
.10165861994028,
.69156980514526,
.58240884542465,
-.20173519849777,
.85162657499313,
-.37615045905113,
3.3111503124237,
-1.5296341180801,
-.12024004757404,
.63248610496521,
-2.2451014518738,
.64205056428909,
1.2146645784378,
.09655395895243,
.48372489213943,
1.9948890209198,
-.17284658551216,
3.0150785446167,
-3.7851057052612,
-6.0686569213867,
.19684991240501,
1.4296782016754,
1.2753949165344])
yr = np.array([np.nan,
-.71084743738174,
-.45302960276604,
-.5336537361145,
-.28908717632294,
-.41811093688011,
-.17837668955326,
-.28686326742172,
-.38719645142555,
-.21206425130367,
-.25943928956985,
-.24907378852367,
-.13120894134045,
-.3038315474987,
-.13652075827122,
-.24856032431126,
-.26095372438431,
-.0817142650485,
-.25331944227219,
-.11463540792465,
-.30990317463875,
-.2312697917223,
-.19703827798367,
-.13884480297565,
-.21037344634533,
-.10912357270718,
-.25505447387695,
-.08537751436234,
.06505750864744,
-.20873957872391,
.02633681893349,
-.35858979821205,
-.1764095723629,
-.07692407816648,
-.09422151744366,
-.00755552388728,
-.13472028076649,
.06121923774481,
-.07569316774607,
-.08725491166115,
.10382451862097,
-.03683112934232,
-.05128625407815,
.03757134452462,
.0120835499838,
-.20756052434444,
-.08892779797316,
-.09145200997591,
-.3934012055397,
-.04422445222735,
-.25698333978653,
-.23304453492165,
-.21458448469639,
-.2003520578146,
-.08937677741051,
.00219011562876,
.47879853844643,
-.12369203567505,
.78174299001694,
.43391767144203,
.4537245631218,
.27500861883163,
.73120957612991,
.21298357844353,
-.41059911251068,
-.02743596211076,
-.11398979276419,
-.08673703670502,
-.66572046279907,
.05183110013604,
.04111221805215,
-.06715416908264,
.44336593151093,
-.13297925889492,
-.1073842421174,
.21235218644142,
.27689066529274,
.63265830278397,
.53099316358566,
.25261387228966,
.92596107721329,
1.0871796607971,
.72950023412704,
1.2417244911194,
1.1726962327957,
-.17451636493206,
.31468516588211,
.71589350700378,
.04926039651036,
.59923303127289,
.05330519750714,
-.79764997959137,
-1.0078164339066,
1.1061102151871,
-.94989138841629,
-1.5492273569107,
-.15939457714558,
-.02174116671085,
-.00960071571171,
.29975482821465,
.15630762279034,
-.2603160738945,
-.00555467186496,
-.3971226811409,
.37694907188416,
-.28419154882431,
-.12997098267078,
.49493381381035,
-2.1872169971466,
.18863087892532,
.04056651890278,
.52041417360306,
.52040469646454,
.22642692923546,
.28773468732834,
.0410239957273,
.2387872338295,
.30328929424286,
.35902243852615,
.20799747109413,
.78556102514267,
.16690990328789,
-.34011232852936,
.93892657756805,
.0971682742238,
.30612966418266,
1.5792326927185,
-.26106956601143,
-1.0760822296143,
-.15856145322323,
-.2090682387352,
-.05402099713683,
-.02849259786308,
-.00878097955137,
.10639289021492,
.00121826829854,
-.08589478582144,
-.35906526446342,
.11230555176735,
-.30025118589401,
-.04236188530922,
.26849341392517,
.02618926763535,
.12735903263092,
.31136092543602,
-.23475293815136,
-.08585914969444,
.23495768010616,
.40034285187721,
-.1601537913084,
.17767761647701,
.15616858005524,
-.56041151285172,
-.45495894551277,
.2263495028019,
-.41232195496559,
-.64078712463379,
.26504465937614,
-.08849616348743,
.02090725488961,
.41125410795212,
-.06374131888151,
.54600352048874,
.25215145945549,
.61358070373535,
.71629631519318,
.00749156065285,
.52497291564941,
-.44604399800301,
-.14874097704887,
-.90750348567963,
-.84061318635941,
.80498331785202,
-.23615552484989,
.30705797672272,
.45593112707138,
-.41729912161827,
-.43860253691673,
.33895090222359,
.48052009940147,
.10165861994028,
.69156980514526,
.58240884542465,
-.20173519849777,
.85162657499313,
-.37615045905113,
3.3111503124237,
-1.5296341180801,
-.12024004757404,
.63248610496521,
-2.2451014518738,
.64205056428909,
1.2146645784378,
.09655395895243,
.48372489213943,
1.9948890209198,
-.17284658551216,
3.0150785446167,
-3.7851057052612,
-6.0686569213867,
.19684991240501,
1.4296782016754,
1.2753949165344])
mse = np.array([ .7963672876358,
.7963672876358,
.71457105875015,
.67959600687027,
.66207146644592,
.65259438753128,
.64725720882416,
.644182741642,
.64238852262497,
.64133352041245,
.64071041345596,
.6403414607048,
.64012265205383,
.63999271392822,
.63991558551788,
.63986974954605,
.63984251022339,
.63982629776001,
.6398167014122,
.6398109793663,
.63980758190155,
.63980555534363,
.63980436325073,
.639803647995,
.63980323076248,
.63980293273926,
.63980281352997,
.63980269432068,
.63980263471603,
.63980263471603,
.63980263471603,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139,
.63980257511139])
stdp = np.array([ .88084751367569,
.88084751367569,
.65303039550781,
.55365419387817,
.45908725261688,
.42810925841331,
.37837743759155,
.37686342000961,
.35719576478004,
.3220648765564,
.31943875551224,
.30907514691353,
.30120712518692,
.31383177638054,
.29652059078217,
.30856171250343,
.30095273256302,
.29171526432037,
.31331890821457,
.30463594198227,
.31990340352058,
.30126947164536,
.29703867435455,
.29884466528893,
.31037190556526,
.30912432074547,
.32505416870117,
.31537705659866,
.33494210243225,
.37874156236649,
.37366089224815,
.40859284996986,
.37640652060509,
.37692713737488,
.39422073960304,
.40755322575569,
.43472331762314,
.43878075480461,
.47569087147713,
.48725643754005,
.49617394804955,
.53683114051819,
.55128628015518,
.56243091821671,
.58791494369507,
.60756206512451,
.58892780542374,
.59145200252533,
.59339815378189,
.54422444105148,
.55698639154434,
.53304374217987,
.51458370685577,
.50035130977631,
.48937830328941,
.49780988693237,
.52120143175125,
.62369203567505,
.6182547211647,
.76608312129974,
.84627467393875,
.92499214410782,
.96879118680954,
1.0870156288147,
1.1105998754501,
1.0274360179901,
1.013991355896,
.98673474788666,
.96571969985962,
.84817039966583,
.85888928174973,
.86715340614319,
.85663330554962,
.93297851085663,
.90738350152969,
.88765007257462,
.92311006784439,
.96734017133713,
1.0690053701401,
1.1473876237869,
1.1740373373032,
1.3128218650818,
1.4704967737198,
1.5582785606384,
1.7273052930832,
1.8745132684708,
1.7853132486343,
1.7841064929962,
1.850741147995,
1.800768494606,
1.8466963768005,
1.7976499795914,
1.6078149080276,
1.3938897848129,
1.5498898029327,
1.3492304086685,
1.059396147728,
1.0217411518097,
1.0096007585526,
1.0002405643463,
1.0436969995499,
1.0603114366531,
1.0055546760559,
.99712115526199,
.92305397987366,
.9841884970665,
.92997401952744,
.90506774187088,
.9872123003006,
.61137217283249,
.65943044424057,
.67959040403366,
.77959072589874,
.87357920408249,
.91226226091385,
.95897603034973,
.96120971441269,
.99671375751495,
1.0409790277481,
1.0919979810715,
1.1144404411316,
1.2330915927887,
1.2401138544083,
1.161071896553,
1.3028255701065,
1.2938764095306,
1.3207612037659,
1.5610725879669,
1.4760913848877,
1.258552312851,
1.2090681791306,
1.1540271043777,
1.12848341465,
1.1087870597839,
1.0936040878296,
1.0987877845764,
1.0858948230743,
1.0590622425079,
.98770052194595,
1.0002481937408,
.94235575199127,
.93150353431702,
.97381073236465,
.9726470708847,
.98864215612411,
1.0347559452057,
.98585307598114,
.96503925323486,
.9996662735939,
1.0601476430893,
1.022319316864,
1.043828368187,
1.0604115724564,
.95495897531509,
.87365657091141,
.91232192516327,
.84078407287598,
.73495537042618,
.78849309682846,
.77909576892853,
.78874284029007,
.8637443780899,
.8540056347847,
.94784545898438,
.98641014099121,
1.0837067365646,
1.1925053596497,
1.1750392913818,
1.2460317611694,
1.1487410068512,
1.1075156927109,
.94060403108597,
.7950227856636,
.93615245819092,
.89293897151947,
.94407802820206,
1.0172899961472,
.93860250711441,
.86104601621628,
.91948908567429,
.99833220243454,
1.008442401886,
1.1175880432129,
1.2017351388931,
1.1483734846115,
1.2761443853378,
1.188849568367,
1.7296310663223,
1.4202431440353,
1.3675138950348,
1.445098400116,
1.031960606575,
1.1313284635544,
1.3214453458786,
1.3112732172012,
1.367110490799,
1.674845457077,
1.5979281663895,
2.064112663269,
1.3536450862885,
.30015936493874,
.36831066012383,
.64060544967651])
icstats = np.array([ 202,
np.nan,
-241.75576160303,
4,
491.51152320605,
504.74459399566])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| bsd-3-clause | fbfd2756a96ed98fa7a522a30805cbe2 | 33.478532 | 229 | 0.400978 | 3.831945 | false | false | false | false |
yarikoptic/pystatsmodels | examples/example_wls.py | 2 | 2809 | """Weighted Least Squares
"""
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
np.random.seed(1024)
# WLS Estimation
# --------------
# Artificial data: Heteroscedasticity 2 groups
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Model assumptions:
#
# * Misspecificaion: true model is quadratic, estimate only linear
# * Independent noise/error term
# * Two groups for error variance, low and high variance groups
nsample = 50
x = np.linspace(0, 20, nsample)
X = np.c_[x, (x - 5)**2, np.ones(nsample)]
beta = [0.5, -0.01, 5.]
sig = 0.5
w = np.ones(nsample)
w[nsample * 6 / 10:] = 3
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + sig * w * e
X = X[:, [0, 2]]
#WLS knowing the true variance ratio of heteroscedasticity
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
mod_wls = sm.WLS(y, X, weights=1. / w)
res_wls = mod_wls.fit()
print res_wls.summary()
#OLS vs. WLS
#-----------
# Estimate an OLS model for comparison
res_ols = sm.OLS(y, X).fit()
# Compare the estimated parameters in WLS and OLS
print res_ols.params
print res_wls.params
# Compare the WLS standard errors to heteroscedasticity corrected OLS standard
# errors:
se = np.vstack([[res_wls.bse], [res_ols.bse], [res_ols.HC0_se],
[res_ols.HC1_se], [res_ols.HC2_se], [res_ols.HC3_se]])
se = np.round(se, 4)
colnames = 'x1', 'const'
rownames = 'WLS', 'OLS', 'OLS_HC0', 'OLS_HC1', 'OLS_HC3', 'OLS_HC3'
tabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt)
print tabl
# Calculate OLS prediction interval
covb = res_ols.cov_params()
prediction_var = res_ols.mse_resid + (X * np.dot(covb, X.T).T).sum(1)
prediction_std = np.sqrt(prediction_var)
tppf = stats.t.ppf(0.975, res_ols.df_resid)
# Draw a plot to compare predicted values in WLS and OLS:
prstd, iv_l, iv_u = wls_prediction_std(res_wls)
plt.figure();
plt.plot(x, y, 'o', x, y_true, 'b-');
plt.plot(x, res_ols.fittedvalues, 'r--');
plt.plot(x, res_ols.fittedvalues + tppf * prediction_std, 'r--');
plt.plot(x, res_ols.fittedvalues - tppf * prediction_std, 'r--');
plt.plot(x, res_wls.fittedvalues, 'g--.');
plt.plot(x, iv_u, 'g--');
plt.plot(x, iv_l, 'g--');
#@savefig wls_ols_0.png
plt.title('blue: true, red: OLS, green: WLS');
# Feasible Weighted Least Squares (2-stage FWLS)
# ----------------------------------------------
resid1 = res_ols.resid[w == 1.]
var1 = resid1.var(ddof=int(res_ols.df_model) + 1)
resid2 = res_ols.resid[w != 1.]
var2 = resid2.var(ddof=int(res_ols.df_model) + 1)
w_est = w.copy()
w_est[w != 1.] = np.sqrt(var2) / np.sqrt(var1)
res_fwls = sm.WLS(y, X, 1. / w_est).fit()
print res_fwls.summary()
| bsd-3-clause | a6a01de18b895ef01fcb1ec34d62ef74 | 30.211111 | 79 | 0.632609 | 2.519283 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/examples/ex_generic_mle_tdist.py | 3 | 39633 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats, special, optimize
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
#global
store_params = []
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Linear Model with t-distributed errors
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
store_params.append(params)
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
nvars = 6
df = 5
rvs = np.random.randn(nobs, nvars-1)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(df, size=nobs)
print data_endog.var()
res_ols = sm.OLS(data_endog, data_exog).fit()
print res_ols.scale
print np.sqrt(res_ols.scale)
print res_ols.params
kurt = stats.kurtosis(res_ols.resid)
df_fromkurt = 6./kurt + 4
print stats.t.stats(df_fromkurt, moments='mvsk')
print stats.t.stats(df, moments='mvsk')
modp = MyT(data_endog, data_exog)
start_value = 0.1*np.ones(data_exog.shape[1]+2)
#start_value = np.zeros(data_exog.shape[1]+2)
#start_value[:nvars] = sm.OLS(data_endog, data_exog).fit().params
start_value[:nvars] = res_ols.params
start_value[-2] = df_fromkurt #10
start_value[-1] = np.sqrt(res_ols.scale) #0.5
modp.start_params = start_value
#adding fixed parameters
fixdf = np.nan * np.zeros(modp.start_params.shape)
fixdf[-2] = 100
fixone = 0
if fixone:
modp.fixed_params = fixdf
modp.fixed_paramsmask = np.isnan(fixdf)
modp.start_params = modp.start_params[modp.fixed_paramsmask]
else:
modp.fixed_params = None
modp.fixed_paramsmask = None
resp = modp.fit(start_params = modp.start_params, disp=1, method='nm')#'newton')
#resp = modp.fit(start_params = modp.start_params, disp=1, method='newton')
print '\nestimation results t-dist'
print resp.params
print resp.bse
resp2 = modp.fit(start_params = resp.params, method='Newton')
print 'using Newton'
print resp2.params
print resp2.bse
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(modp.start_params, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_params)
print tmp.shape
#np.linalg.eigh(np.linalg.inv(hb))[0]
pp=np.array(store_params)
print pp.min(0)
print pp.max(0)
##################### Example: Pareto
# estimating scale doesn't work yet, a bug somewhere ?
# fit_ks works well, but no bse or other result statistics yet
#import for kstest based estimation
#should be replace
import statsmodels.sandbox.distributions.sppatch
class MyPareto(GenericLikelihoodModel):
'''Maximum Likelihood Estimation pareto distribution
first version: iid case, with constant parameters
'''
#copied from stats.distribution
def pdf(self, x, b):
return b * x**(-b-1)
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
def nloglikeobs(self, params):
#print params.shape
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
b = params[0]
loc = params[1]
scale = params[2]
#loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
logpdf = np_log(b) - (b+1.)*np_log(x) #use np_log(1 + x) for Pareto II
logpdf -= np.log(scale)
#lb = loc + scale
#logpdf[endog<lb] = -inf
#import pdb; pdb.set_trace()
logpdf[x<1] = -10000 #-np.inf
return -logpdf
def fit_ks(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
this doesn't trim lower values during ks optimization
'''
rvs = self.endog
rvsmin = rvs.min()
fixdf = np.nan * np.ones(3)
self.fixed_params = fixdf
self.fixed_paramsmask = np.isnan(fixdf)
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
#est = self.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
self.fixed_params[1] = loc
est = self.fit(start_params=self.start_params[self.fixed_paramsmask]).params
#est = self.fit(start_params=self.start_params, method='nm').params
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 0., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
def fit_ks1_trim(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
self.nobs = self.endog.shape[0]
rvs = np.sort(self.endog)
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
maxind = min(np.floor(self.nobs*0.95).astype(int), self.nobs-10)
res = []
for trimidx in range(self.nobs//2, maxind):
xmin = loc = rvs[trimidx]
res.append([trimidx, pareto_ks(loc-1e-10, rvs[trimidx:])])
res = np.array(res)
bestidx = res[np.argmin(res[:,1]),0].astype(int)
print bestidx
locest = rvs[bestidx]
est = stats.pareto.fit_fr(rvs[bestidx:], 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest, est[1])
return args
def fit_ks1(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
rvs = self.endog
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
#y = stats.pareto.rvs(1, loc=10, scale=2, size=nobs)
y = stats.pareto.rvs(1, loc=0, scale=2, size=nobs)
par_start_params = np.array([1., 9., 2.])
mod_par = MyPareto(y)
mod_par.start_params = np.array([1., 10., 2.])
mod_par.start_params = np.array([1., -9., 2.])
mod_par.fixed_params = None
fixdf = np.nan * np.ones(mod_par.start_params.shape)
fixdf[1] = 9.9
#fixdf[2] = 2.
fixone = 0
if fixone:
mod_par.fixed_params = fixdf
mod_par.fixed_paramsmask = np.isnan(fixdf)
mod_par.start_params = mod_par.start_params[mod_par.fixed_paramsmask]
mod_par.df_model = 2
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
mod_par.data.xnames = ['shape', 'scale']
else:
mod_par.fixed_params = None
mod_par.fixed_paramsmask = None
mod_par.df_model = 3
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
mod_par.data.xnames = ['shape', 'loc', 'scale']
res_par = mod_par.fit(start_params=mod_par.start_params, method='nm', maxfun=10000, maxiter=5000)
#res_par2 = mod_par.fit(start_params=res_par.params, method='newton', maxfun=10000, maxiter=5000)
res_parks = mod_par.fit_ks1()
print res_par.params
#print res_par2.params
print res_parks
print res_par.params[1:].sum(), sum(res_parks[1:]), mod_par.endog.min()
#start new model, so we don't get two result instances with the same model instance
mod_par = MyPareto(y)
mod_par.fixed_params = fixdf
mod_par.fixed_paramsmask = np.isnan(fixdf)
mod_par.df_model = mod_par.fixed_paramsmask.sum()
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
#mod_par.data.xnames = np.array(['shape', 'loc', 'scale'])[mod_par.fixed_paramsmask].tolist() # works also
mod_par.data.xnames = [name for (name, incl) in zip(['shape', 'loc', 'scale'], mod_par.fixed_paramsmask) if incl]
res_par3 = mod_par.start_params = par_start_params[mod_par.fixed_paramsmask]
res5 = mod_par.fit(start_params=mod_par.start_params)
##res_parks2 = mod_par.fit_ks()
##
##res_parkst = mod_par.fit_ks1_trim()
##print res_parkst
print res5.summary()
print res5.t_test([[1,0]])
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
0.0686702747648
0.0164150896481
0.128121386381
[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
(array(0.0), array(1.4552599885729831), array(0.0), array(2.5072143354058238))
(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
0.89964496, 6.39309417, 0.12812139])
Optimization terminated successfully.
Current function value: -679.951339
Iterations: 398
Function evaluations: 609
estimation results t-dist
[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
4.72131318 0.09825355]
[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
0.7232824 0.00388829]
repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
0.8996041 , 4.72131318, 0.09825355])
Optimization terminated successfully.
Current function value: -679.950443
Iterations 3
using Newton
[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
4.70918964 0.09815885]
[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
0.72014031 0.00388434]
()
[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
4.60459182 0.09661986]
[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
7.15412655 0.13452746]
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
>>> res_par.params
array([ 7.42705803e+152, 2.17339053e+153])
>>> mod_par.loglike(mod_p.start_params)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'mod_p' is not defined
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> np.log(mod_par.pdf(mod_par.start_params))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: pdf() takes exactly 3 arguments (2 given)
>>> np.log(mod_par.pdf(*mod_par.start_params))
0.69314718055994529
>>> mod_par.loglike(*mod_par.start_params)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: loglike() takes exactly 2 arguments (3 given)
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> np.log(stats.pareto.pdf(y[0],*mod_par.start_params))
-4.6414308627431353
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> mod_par.nloglikeobs(mod_par.start_params)[0]
0.29377232943845044
>>> mod_par.start_params
array([ 1., 2.])
>>> np.log(stats.pareto.pdf(y[0],1,9.5,2))
-1.2806918394368461
>>> mod_par.fixed_params= None
>>> mod_par.nloglikeobs(np.array([1., 10., 2.]))[0]
0.087533156771285828
>>> y[0]
12.182956907488885
>>> mod_para.endog[0]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'mod_para' is not defined
>>> mod_par.endog[0]
12.182956907488885
>>> np.log(stats.pareto.pdf(y[0],1,10,2))
-0.86821349410251702
>>> np.log(stats.pareto.pdf(y[0],1.,10.,2.))
-0.86821349410251702
>>> stats.pareto.pdf(y[0],1.,10.,2.)
0.41970067762301644
>>> mod_par.loglikeobs(np.array([1., 10., 2.]))[0]
-0.087533156771285828
>>>
'''
'''
>>> mod_par.nloglikeobs(np.array([1., 10., 2.]))[0]
0.86821349410251691
>>> np.log(stats.pareto.pdf(y,1.,10.,2.)).sum()
-2627.9403758026938
'''
#'''
#C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
# please delete it from your matplotlibrc file
# warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
#0.0686702747648
#0.0164150896481
#0.128121386381
#[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
#(array(0.0), array(1.4552599885729827), array(0.0), array(2.5072143354058203))
#(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
#repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
# 0.89964496, 6.39309417, 0.12812139])
#Optimization terminated successfully.
# Current function value: -679.951339
# Iterations: 398
# Function evaluations: 609
#
#estimation results t-dist
#[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
# 4.72131318 0.09825355]
#[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
# 0.72325227 0.00388822]
#repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
# 0.8996041 , 4.72131318, 0.09825355])
#Optimization terminated successfully.
# Current function value: -679.950443
# Iterations 3
#using Newton
#[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
# 4.70918964 0.09815885]
#[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
# 0.72014669 0.00388436]
#()
#[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
# 4.60459182 0.09661986]
#[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
# 7.15412655 0.13452746]
#repr(start_params) array([ 1., 2.])
#Warning: Maximum number of function evaluations has been exceeded.
#repr(start_params) array([ 3.06504406e+302, 3.29325579e+303])
#Traceback (most recent call last):
# File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\examples\ex_generic_mle_tdist.py", line 222, in <module>
# res_par2 = mod_par.fit(start_params=res_par.params, method='newton', maxfun=10000, maxiter=5000)
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 547, in fit
# disp=disp, callback=callback, **kwargs)
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 262, in fit
# newparams = oldparams - np.dot(np.linalg.inv(H),
# File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 423, in inv
# return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 306, in solve
# raise LinAlgError, 'Singular matrix'
#numpy.linalg.linalg.LinAlgError: Singular matrix
#
#>>> mod_par.fixed_params
#array([ NaN, 10., NaN])
#>>> mod_par.start_params
#array([ 1., 2.])
#>>> np.source(stats.pareto.fit_fr)
#In file: c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py
#
#def fit_fr(self, data, *args, **kwds):
# '''estimate distribution parameters by MLE taking some parameters as fixed
#
# Parameters
# ----------
# data : array, 1d
# data for which the distribution parameters are estimated,
# args : list ? check
# starting values for optimization
# kwds :
#
# - 'frozen' : array_like
# values for frozen distribution parameters and, for elements with
# np.nan, the corresponding parameter will be estimated
#
# Returns
# -------
# argest : array
# estimated parameters
#
#
# Examples
# --------
# generate random sample
# >>> np.random.seed(12345)
# >>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
#
# estimate all parameters
# >>> stats.gamma.fit(x)
# array([ 2.0243194 , 0.20395655, 1.44411371])
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
# array([ 2.0243194 , 0.20395655, 1.44411371])
#
# keep loc fixed, estimate shape and scale parameters
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
# array([ 2.45603985, 1.27333105])
#
# keep loc and scale fixed, estimate shape parameter
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
# array([ 3.00048828])
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
# array([ 2.57792969])
#
# estimate only scale parameter for fixed shape and loc
# >>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
# array([ 1.25087891])
#
# Notes
# -----
# self is an instance of a distribution class. This can be attached to
# scipy.stats.distributions.rv_continuous
#
# *Todo*
#
# * check if docstring is correct
# * more input checking, args is list ? might also apply to current fit method
#
# '''
# loc0, scale0 = map(kwds.get, ['loc', 'scale'],[0.0, 1.0])
# Narg = len(args)
#
# if Narg == 0 and hasattr(self, '_fitstart'):
# x0 = self._fitstart(data)
# elif Narg > self.numargs:
# raise ValueError, "Too many input arguments."
# else:
# args += (1.0,)*(self.numargs-Narg)
# # location and scale are at the end
# x0 = args + (loc0, scale0)
#
# if 'frozen' in kwds:
# frmask = np.array(kwds['frozen'])
# if len(frmask) != self.numargs+2:
# raise ValueError, "Incorrect number of frozen arguments."
# else:
# # keep starting values for not frozen parameters
# x0 = np.array(x0)[np.isnan(frmask)]
# else:
# frmask = None
#
# #print x0
# #print frmask
# return optimize.fmin(self.nnlf_fr, x0,
# args=(np.ravel(data), frmask), disp=0)
#
#>>> stats.pareto.fit_fr(y, 1., frozen=[np.nan, loc, np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'loc' is not defined
#
#>>> stats.pareto.fit_fr(y, 1., frozen=[np.nan, 10., np.nan])
#array([ 1.0346268 , 2.00184808])
#>>> stats.pareto.fit_fr(y, (1.,2), frozen=[np.nan, 10., np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py", line 273, in fit_fr
# x0 = np.array(x0)[np.isnan(frmask)]
#ValueError: setting an array element with a sequence.
#
#>>> stats.pareto.fit_fr(y, [1.,2], frozen=[np.nan, 10., np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py", line 273, in fit_fr
# x0 = np.array(x0)[np.isnan(frmask)]
#ValueError: setting an array element with a sequence.
#
#>>> stats.pareto.fit_fr(y, frozen=[np.nan, 10., np.nan])
#array([ 1.03463526, 2.00184809])
#>>> stats.pareto.pdf(y, 1.03463526, 10, 2.00184809).sum()
#173.33947284555239
#>>> mod_par(1.03463526, 10, 2.00184809)
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: 'MyPareto' object is not callable
#
#>>> mod_par.loglike(1.03463526, 10, 2.00184809)
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: loglike() takes exactly 2 arguments (4 given)
#
#>>> mod_par.loglike((1.03463526, 10, 2.00184809))
#-962.21623668859741
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 10, 2.00184809)).sum()
#-inf
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 9, 2.00184809)).sum()
#-3074.5947476137271
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 10., 2.00184809)).sum()
#-inf
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 9.9, 2.00184809)).sum()
#-2677.3867091635661
#>>> y.min()
#12.001848089426717
#>>> np.log(stats.pareto.pdf(y, 1.03463526, loc=9.9, scale=2.00184809)).sum()
#-2677.3867091635661
#>>> np.log(stats.pareto.pdf(y, 1.03463526, loc=10., scale=2.00184809)).sum()
#-inf
#>>> stats.pareto.logpdf(y, 1.03463526, loc=10., scale=2.00184809).sum()
#-inf
#>>> stats.pareto.logpdf(y, 1.03463526, loc=9.99, scale=2.00184809).sum()
#-2631.6120098202355
#>>> mod_par.loglike((1.03463526, 9.99, 2.00184809))
#-963.2513896113644
#>>> maxabs(y, mod_par.endog)
#0.0
#>>> np.source(stats.pareto.logpdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def logpdf(self, x, *args, **kwds):
# """
# Log of the probability density function at x of the given RV.
#
# This uses more numerically accurate calculation if available.
#
# Parameters
# ----------
# x : array-like
# quantiles
# arg1, arg2, arg3,... : array-like
# The shape parameter(s) for the distribution (see docstring of the
# instance object for more information)
# loc : array-like, optional
# location parameter (default=0)
# scale : array-like, optional
# scale parameter (default=1)
#
# Returns
# -------
# logpdf : array-like
# Log of the probability density function evaluated at x
#
# """
# loc,scale=map(kwds.get,['loc','scale'])
# args, loc, scale = self._fix_loc_scale(args, loc, scale)
# x,loc,scale = map(arr,(x,loc,scale))
# args = tuple(map(arr,args))
# x = arr((x-loc)*1.0/scale)
# cond0 = self._argcheck(*args) & (scale > 0)
# cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
# cond = cond0 & cond1
# output = empty(shape(cond),'d')
# output.fill(NINF)
# putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
# goodargs = argsreduce(cond, *((x,)+args+(scale,)))
# scale, goodargs = goodargs[-1], goodargs[:-1]
# place(output,cond,self._logpdf(*goodargs) - log(scale))
# if output.ndim == 0:
# return output[()]
# return output
#
#>>> np.source(stats.pareto._logpdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def _logpdf(self, x, *args):
# return log(self._pdf(x, *args))
#
#>>> np.source(stats.pareto._pdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def _pdf(self, x, b):
# return b * x**(-b-1)
#
#>>> stats.pareto.a
#1.0
#>>> (1-loc)/scale
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'loc' is not defined
#
#>>> b, loc, scale = (1.03463526, 9.99, 2.00184809)
#>>> (1-loc)/scale
#-4.4908502522786327
#>>> (x-loc)/scale == 1
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'x' is not defined
#
#>>> (lb-loc)/scale == 1
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'lb' is not defined
#
#>>> lb = scale + loc
#>>> lb
#11.991848090000001
#>>> (lb-loc)/scale == 1
#False
#>>> (lb-loc)/scale
#1.0000000000000004
#>>>
#'''
'''
repr(start_params) array([ 1., 10., 2.])
Optimization terminated successfully.
Current function value: 2626.436870
Iterations: 102
Function evaluations: 210
Optimization terminated successfully.
Current function value: 0.016555
Iterations: 16
Function evaluations: 35
[ 1.03482659 10.00737039 1.9944777 ]
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
>>> 9.9043376069230007 + 2.0975104813987118
12.001848088321712
>>> y.min()
12.001848089426717
'''
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
0.0686702747648
0.0164150896481
0.128121386381
[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
(array(0.0), array(1.4552599885729829), array(0.0), array(2.5072143354058221))
(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
0.89964496, 6.39309417, 0.12812139])
Optimization terminated successfully.
Current function value: -679.951339
Iterations: 398
Function evaluations: 609
estimation results t-dist
[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
4.72131318 0.09825355]
[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
0.72329352 0.00388832]
repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
0.8996041 , 4.72131318, 0.09825355])
Optimization terminated successfully.
Current function value: -679.950443
Iterations 3
using Newton
[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
4.70918964 0.09815885]
[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
0.7201488 0.00388437]
()
[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
4.60459182 0.09661986]
[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
7.15412655 0.13452746]
repr(start_params) array([ 1., 9., 2.])
Optimization terminated successfully.
Current function value: 2636.129089
Iterations: 147
Function evaluations: 279
Optimization terminated successfully.
Current function value: 0.016555
Iterations: 16
Function evaluations: 35
[ 0.84856418 10.2197801 1.78206799]
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
12.0018480891 12.0018480883 12.0018480894
repr(start_params) array([ 1., 2.])
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2643.549907
Iterations: 2
Function evaluations: 13
Gradient evaluations: 12
>>> res_parks2 = mod_par.fit_ks()
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2642.465273
Iterations: 92
Function evaluations: 172
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2636.639863
Iterations: 73
Function evaluations: 136
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2631.568778
Iterations: 75
Function evaluations: 133
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.821044
Iterations: 75
Function evaluations: 135
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2631.568778
Iterations: 75
Function evaluations: 133
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.431596
Iterations: 58
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.737426
Iterations: 60
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.821044
Iterations: 75
Function evaluations: 135
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.471666
Iterations: 48
Function evaluations: 94
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.196314
Iterations: 66
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.578538
Iterations: 56
Function evaluations: 103
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.471666
Iterations: 48
Function evaluations: 94
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.651702
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.737426
Iterations: 60
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.613505
Iterations: 73
Function evaluations: 141
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.578538
Iterations: 56
Function evaluations: 103
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.651702
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.622789
Iterations: 63
Function evaluations: 114
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.613505
Iterations: 73
Function evaluations: 141
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.627465
Iterations: 59
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.625104
Iterations: 59
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629829
Iterations: 66
Function evaluations: 118
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.628642
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.631023
Iterations: 68
Function evaluations: 129
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630430
Iterations: 57
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629598
Iterations: 60
Function evaluations: 112
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630430
Iterations: 57
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630130
Iterations: 65
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629536
Iterations: 62
Function evaluations: 111
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630130
Iterations: 65
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629984
Iterations: 67
Function evaluations: 123
Optimization terminated successfully.
Current function value: 0.016560
Iterations: 18
Function evaluations: 38
>>> res_parks2
(1.0592352626264809, 9.9051580457572399, 2.0966900385041591)
>>> res_parks
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
>>> res_par.params
array([ 0.84856418, 10.2197801 , 1.78206799])
>>> np.sqrt(np.diag(mod_par.hessian(res_par.params)))
array([ NaN, NaN, NaN])
>>> mod_par.hessian(res_par.params
... )
array([[ NaN, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.hessian(res_parks)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 533, in hessian
return approx_hess(params, self.loglike)[0] #need options for hess (epsilon)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 118, in approx_hess
xh = x + h
TypeError: can only concatenate tuple (not "float") to tuple
>>> mod_par.hessian(np.array(res_parks))
array([[ NaN, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.fixed_params
array([ NaN, 9.90510677, NaN])
>>> mod_par.fixed_params=None
>>> mod_par.hessian(np.array(res_parks))
array([[-890.48553491, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.loglike(np.array(res_parks))
-2626.6322080820569
>>> mod_par.bsejac
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 592, in bsejac
return np.sqrt(np.diag(self.covjac))
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 574, in covjac
jacv = self.jacv
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 557, in jacv
return self.jac(self._results.params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 530, in jac
return approx_fprime1(params, self.loglikeobs, **kwds)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 80, in approx_fprime1
f0 = f(*((xk,)+args))
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 522, in loglikeobs
return -self.nloglikeobs(params)
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\examples\ex_generic_mle_tdist.py", line 184, in nloglikeobs
scale = params[2]
IndexError: index out of bounds
>>> hasattr(self, 'start_params')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> hasattr(mod_par, 'start_params')
True
>>> mod_par.start_params
array([ 1., 2.])
>>> stats.pareto.stats(1., 9., 2., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., 8., 2., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., 8., 1., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(0.5., moments='mvsk')
File "<stdin>", line 1
stats.pareto.stats(0.5., moments='mvsk')
^
SyntaxError: invalid syntax
>>> stats.pareto.stats(0.5, moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(2, moments='mvsk')
(array(2.0), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(10, moments='mvsk')
(array(1.1111111111111112), array(0.015432098765432098), array(2.8110568859997356), array(14.828571428571429))
>>> stats.pareto.rvs(10, size=10)
array([ 1.07716265, 1.18977526, 1.07093 , 1.05157081, 1.15991232,
1.31015589, 1.06675107, 1.08082475, 1.19501243, 1.34967158])
>>> r = stats.pareto.rvs(10, size=1000)
>>> plt
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'plt' is not defined
>>> import matplotlib.pyplot as plt
>>> plt.hist(r)
(array([962, 32, 3, 2, 0, 0, 0, 0, 0, 1]), array([ 1.00013046, 1.3968991 , 1.79366773, 2.19043637, 2.587205 ,
2.98397364, 3.38074227, 3.77751091, 4.17427955, 4.57104818,
4.96781682]), <a list of 10 Patch objects>)
>>> plt.show()
'''
| bsd-3-clause | d38cb1c1bbfa390d53cd5e06b9508159 | 35.26075 | 167 | 0.658542 | 2.877169 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/tools/parallel.py | 3 | 1838 | '''Parallel utility function using joblib
copied from https://github.com/mne-tools/mne-python
Author: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
License: Simplified BSD
changes for statsmodels (Josef Perktold)
- try import from joblib directly, (doesn't import all of sklearn)
'''
def parallel_func(func, n_jobs, verbose=5):
"""Return parallel instance with delayed function
Util function to use joblib only if available
Parameters
----------
func: callable
A function
n_jobs: int
Number of jobs to run in parallel
verbose: int
Verbosity level
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object
my_func: callable
func if not parallel or delayed(func)
n_jobs: int
Number of jobs >= 0
Examples
--------
>>> from math import sqrt
>>> from statsmodels.tools.parallel import parallel_func
>>> parallel, p_func, n_jobs = parallel_func(sqrt, n_jobs=-1, verbose=0)
>>> print n_jobs
>>> parallel(p_func(i**2) for i in range(10))
"""
try:
try:
from joblib import Parallel, delayed
except ImportError:
from sklearn.externals.joblib import Parallel, delayed
parallel = Parallel(n_jobs, verbose=verbose)
my_func = delayed(func)
if n_jobs == -1:
try:
import multiprocessing
n_jobs = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
print "multiprocessing not installed. Cannot run in parallel."
n_jobs = 1
except ImportError:
print "joblib not installed. Cannot run in parallel."
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs
| bsd-3-clause | e242c7789a3a528df7103b06f08c2f5f | 26.432836 | 78 | 0.615343 | 4.284382 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/regression/tools.py | 6 | 13069 | '''gradient/Jacobian of normal and t loglikelihood
use chain rule
normal derivative wrt mu, sigma and beta
new version: loc-scale distributions, derivative wrt loc, scale
also includes "standardized" t distribution (for use in GARCH)
TODO:
* use sympy for derivative of loglike wrt shape parameters
it works for df of t distribution dlog(gamma(a))da = polygamma(0,a) check
polygamma is available in scipy.special
* get loc-scale example to work with mean = X*b
* write some full unit test examples
A: josef-pktd
'''
import numpy as np
from scipy import special
from scipy.special import gammaln
def norm_lls(y, params):
'''normal loglikelihood given observations and mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
lls : array
contribution to loglikelihood for each observation
'''
mu, sigma2 = params.T
lls = -0.5*(np.log(2*np.pi) + np.log(sigma2) + (y-mu)**2/sigma2)
return lls
def norm_lls_grad(y, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt variance in second column
Notes
-----
this is actually the derivative wrt sigma not sigma**2, but evaluated
with parameter sigma2 = sigma**2
'''
mu, sigma2 = params.T
dllsdmu = (y-mu)/sigma2
dllsdsigma2 = ((y-mu)**2/sigma2 - 1)/np.sqrt(sigma2)
return np.column_stack((dllsdmu, dllsdsigma2))
def mean_grad(x, beta):
'''gradient/Jacobian for d (x*beta)/ d beta
'''
return x
def normgrad(y, x, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable with mean x*beta, and variance sigma2
x : array, 2d
explanatory variables, observation in rows, variables in columns
params: array_like, (nvars + 1)
array of coefficients and variance (beta, sigma2)
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt scale (sigma) in second column
assume params = (beta, sigma2)
Notes
-----
TODO: for heteroscedasticity need sigma to be a 1d array
'''
beta = params[:-1]
sigma2 = params[-1]*np.ones((len(y),1))
dmudbeta = mean_grad(x, beta)
mu = np.dot(x, beta)
#print beta, sigma2
params2 = np.column_stack((mu,sigma2))
dllsdms = norm_lls_grad(y,params2)
grad = np.column_stack((dllsdms[:,:1]*dmudbeta, dllsdms[:,:1]))
return grad
def tstd_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : integer
degrees of freedom of the t distribution
Returns
-------
lls : array
contribution to loglikelihood for each observation
Notes
-----
parameterized for garch
'''
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2)/sigma2) + 0.5 * np.log(sigma2)
return lls
def norm_dlldy(y):
'''derivative of log pdf of standard normal with respect to y
'''
return -y
def ts_dlldy(y, df):
'''derivative of log pdf of standardized (?) t with respect to y
Notes
-----
parameterized for garch, with mean 0 and variance 1
'''
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
#return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
return -(df+1)/(df) / (1 + y**2/(df)) * y
def tstd_pdf(x, df):
'''pdf for standardized (not standard) t distribution, variance is one
'''
r = np.array(df*1.0)
Px = np.exp(special.gammaln((r+1)/2.)-special.gammaln(r/2.))/np.sqrt((r-2)*pi)
Px /= (1+(x**2)/(r-2))**((r+1)/2.)
return Px
def ts_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : integer
degrees of freedom of the t distribution
Returns
-------
lls : array
contribution to loglikelihood for each observation
Notes
-----
parameterized for garch
normalized/rescaled so that sigma2 is the variance
>>> df = 10; sigma = 1.
>>> stats.t.stats(df, loc=0., scale=sigma.*np.sqrt((df-2.)/df))
(array(0.0), array(1.0))
>>> sigma = np.sqrt(2.)
>>> stats.t.stats(df, loc=0., scale=sigma*np.sqrt((df-2.)/df))
(array(0.0), array(2.0))
'''
print y, params, df
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df)*np.pi)
lls -= (df+1.)/2. * np.log(1. + (y-mu)**2/(df)/sigma2) + 0.5 * np.log(sigma2)
return lls
def ts_dlldy(y, df):
'''derivative of log pdf of standard t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : array
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
with mean 0 and scale 1, but variance is df/(df-2)
'''
df = df*1.
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
#return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
return -(df+1)/(df) / (1 + y**2/(df)) * y
def tstd_dlldy(y, df):
'''derivative of log pdf of standardized t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : array
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
parameterized for garch, standardized to variance=1
'''
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
#return (df+1)/(df) / (1 + y**2/(df)) * y
def locscale_grad(y, loc, scale, dlldy, *args):
'''derivative of log-likelihood with respect to location and scale
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
loc : float
location parameter of distribution
scale : float
scale parameter of distribution
dlldy : function
derivative of loglikelihood fuction wrt. random variable x
args : array_like
shape parameters of log-likelihood function
Returns
-------
dlldloc : array
derivative of loglikelihood wrt location evaluated at the
points given in y
dlldscale : array
derivative of loglikelihood wrt scale evaluated at the
points given in y
'''
yst = (y-loc)/scale #ystandardized
dlldloc = -dlldy(yst, *args) / scale
dlldscale = -1./scale - dlldy(yst, *args) * (y-loc)/scale**2
return dlldloc, dlldscale
if __name__ == '__main__':
verbose = 0
if verbose:
sig = 0.1
beta = np.ones(2)
rvs = np.random.randn(10,3)
x = rvs[:,1:]
y = np.dot(x,beta) + sig*rvs[:,0]
params = [1,1,1]
print normgrad(y, x, params)
dllfdbeta = (y-np.dot(x, beta))[:,None]*x #for sigma = 1
print dllfdbeta
print locscale_grad(y, np.dot(x, beta), 1, norm_dlldy)
print (y-np.dot(x, beta))
from scipy import stats, misc
def llt(y,loc,scale,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def lltloc(loc,y,scale,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def lltscale(scale,y,loc,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def llnorm(y,loc,scale):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
def llnormloc(loc,y,scale):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
def llnormscale(scale,y,loc):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
if verbose:
print '\ngradient of t'
print misc.derivative(llt, 1, dx=1e-6, n=1, args=(0,1,10), order=3)
print 't ', locscale_grad(1, 0, 1, tstd_dlldy, 10)
print 'ts', locscale_grad(1, 0, 1, ts_dlldy, 10)
print misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(0,1,20), order=3),
print 'ts', locscale_grad(1.5, 0, 1, ts_dlldy, 20)
print misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(0,2,20), order=3),
print 'ts', locscale_grad(1.5, 0, 2, ts_dlldy, 20)
print misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(1,2,20), order=3),
print 'ts', locscale_grad(1.5, 1, 2, ts_dlldy, 20)
print misc.derivative(lltloc, 1, dx=1e-10, n=1, args=(1.5,2,20), order=3),
print misc.derivative(lltscale, 2, dx=1e-10, n=1, args=(1.5,1,20), order=3)
y,loc,scale,df = 1.5, 1, 2, 20
print 'ts', locscale_grad(y,loc,scale, ts_dlldy, 20)
print misc.derivative(lltloc, loc, dx=1e-10, n=1, args=(y,scale,df), order=3),
print misc.derivative(lltscale, scale, dx=1e-10, n=1, args=(y,loc,df), order=3)
print '\ngradient of norm'
print misc.derivative(llnorm, 1, dx=1e-6, n=1, args=(0,1), order=3)
print locscale_grad(1, 0, 1, norm_dlldy)
y,loc,scale = 1.5, 1, 2
print 'ts', locscale_grad(y,loc,scale, norm_dlldy)
print misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(y,scale), order=3),
print misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(y,loc), order=3)
y,loc,scale = 1.5, 0, 1
print 'ts', locscale_grad(y,loc,scale, norm_dlldy)
print misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(y,scale), order=3),
print misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(y,loc), order=3)
#print 'still something wrong with handling of scale and variance'
#looks ok now
print '\nloglike of t'
print tstd_lls(1, np.array([0,1]), 100), llt(1,0,1,100), 'differently standardized'
print tstd_lls(1, np.array([0,1]), 10), llt(1,0,1,10), 'differently standardized'
print ts_lls(1, np.array([0,1]), 10), llt(1,0,1,10)
print tstd_lls(1, np.array([0,1.*10./8.]), 10), llt(1.,0,1.,10)
print ts_lls(1, np.array([0,1]), 100), llt(1,0,1,100)
print tstd_lls(1, np.array([0,1]), 10), llt(1,0,1.*np.sqrt(8/10.),10)
from numpy.testing import assert_almost_equal
params =[(0, 1), (1.,1.), (0.,2.), ( 1., 2.)]
yt = np.linspace(-2.,2.,11)
for loc,scale in params:
dlldlo = misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(yt,scale), order=3)
dlldsc = misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(yt,loc), order=3)
gr = locscale_grad(yt, loc, scale, norm_dlldy)
assert_almost_equal(dlldlo, gr[0], 5, err_msg='deriv loc')
assert_almost_equal(dlldsc, gr[1], 5, err_msg='deriv scale')
for df in [3, 10, 100]:
for loc,scale in params:
dlldlo = misc.derivative(lltloc, loc, dx=1e-10, n=1, args=(yt,scale,df), order=3)
dlldsc = misc.derivative(lltscale, scale, dx=1e-10, n=1, args=(yt,loc,df), order=3)
gr = locscale_grad(yt, loc, scale, ts_dlldy, df)
assert_almost_equal(dlldlo, gr[0], 4, err_msg='deriv loc')
assert_almost_equal(dlldsc, gr[1], 4, err_msg='deriv scale')
assert_almost_equal(ts_lls(yt, np.array([loc, scale**2]), df),
llt(yt,loc,scale,df), 5,
err_msg='loglike')
assert_almost_equal(tstd_lls(yt, np.array([loc, scale**2]), df),
llt(yt,loc,scale*np.sqrt((df-2.)/df),df), 5,
err_msg='loglike')
| bsd-3-clause | ea65577f9c78d9838e6f180f097c2456 | 31.591022 | 95 | 0.587038 | 3.061373 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/tsa/examples/example_var.py | 36 | 1218 | """
Look at some macro plots, then do some VARs and IRFs.
"""
import numpy as np
import statsmodels.api as sm
import scikits.timeseries as ts
import scikits.timeseries.lib.plotlib as tplt
from matplotlib import pyplot as plt
data = sm.datasets.macrodata.load()
data = data.data
### Create Timeseries Representations of a few vars
dates = ts.date_array(start_date=ts.Date('Q', year=1959, quarter=1),
end_date=ts.Date('Q', year=2009, quarter=3))
ts_data = data[['realgdp','realcons','cpi']].view(float).reshape(-1,3)
ts_data = np.column_stack((ts_data, (1 - data['unemp']/100) * data['pop']))
ts_series = ts.time_series(ts_data, dates)
fig = tplt.tsfigure()
fsp = fig.add_tsplot(221)
fsp.tsplot(ts_series[:,0],'-')
fsp.set_title("Real GDP")
fsp = fig.add_tsplot(222)
fsp.tsplot(ts_series[:,1],'r-')
fsp.set_title("Real Consumption")
fsp = fig.add_tsplot(223)
fsp.tsplot(ts_series[:,2],'g-')
fsp.set_title("CPI")
fsp = fig.add_tsplot(224)
fsp.tsplot(ts_series[:,3],'y-')
fsp.set_title("Employment")
# Plot real GDP
#plt.subplot(221)
#plt.plot(data['realgdp'])
#plt.title("Real GDP")
# Plot employment
#plt.subplot(222)
# Plot cpi
#plt.subplot(223)
# Plot real consumption
#plt.subplot(224)
#plt.show()
| bsd-3-clause | f25410803ba2cdbd647124a012046f54 | 21.145455 | 75 | 0.690476 | 2.490798 | false | false | true | false |
yarikoptic/pystatsmodels | statsmodels/tsa/tests/results/arima111_css_results.py | 35 | 44167 | import numpy as np
llf = np.array([-242.06033399744])
nobs = np.array([ 202])
k = np.array([ 4])
k_exog = np.array([ 1])
sigma = np.array([ .80201496146073])
chi2 = np.array([ 348.43324197088])
df_model = np.array([ 2])
k_ar = np.array([ 1])
k_ma = np.array([ 1])
params = np.array([ .82960638524364,
.93479332833705,
-.75728342544279,
.64322799840686])
cov_params = np.array([ .14317811930738,
-.01646077810033,
.01510986837498,
-.00280799533479,
-.01646077810033,
.00321032468661,
-.00353027620719,
.00097645385252,
.01510986837498,
-.00353027620719,
.00484312817753,
-.00112050648944,
-.00280799533479,
.00097645385252,
-.00112050648944,
.0007715609499]).reshape(4,4)
xb = np.array([ .82960641384125,
.82960641384125,
.697261095047,
.61113905906677,
.51607495546341,
.47362637519836,
.41342103481293,
.40238001942635,
.37454023957253,
.33222004771233,
.32514902949333,
.31093680858612,
.30019253492355,
.31159669160843,
.29182952642441,
.30349296331406,
.29457464814186,
.28427124023438,
.30664679408073,
.29696446657181,
.31270903348923,
.29268020391464,
.28816330432892,
.29006817936897,
.30216124653816,
.30066826939583,
.31728908419609,
.30679926276207,
.3272570669651,
.37292611598969,
.36668366193771,
.40278288722038,
.36799272894859,
.36827209591866,
.38623574376106,
.39983862638474,
.42789059877396,
.43138384819031,
.46953064203262,
.48066720366478,
.48910140991211,
.53098994493484,
.54496067762375,
.55554050207138,
.58130383491516,
.60081332921982,
.58008605241776,
.58214038610458,
.58369606733322,
.53162068128586,
.54543834924698,
.52040082216263,
.50143963098526,
.48708060383797,
.47620677947998,
.48572361469269,
.51068127155304,
.61833620071411,
.61110657453537,
.76539021730423,
.84672522544861,
.92606955766678,
.96840506792068,
1.0892199277878,
1.1097067594528,
1.0187155008316,
1.0030621290207,
.97345739603043,
.95103752613068,
.82755368947983,
.84054774045944,
.85038793087006,
.84008830785751,
.92104357481003,
.89359468221664,
.87280809879303,
.91032028198242,
.95647835731506,
1.0624366998672,
1.1426770687103,
1.1679404973984,
1.311328291893,
1.473167181015,
1.5602221488953,
1.7326545715332,
1.8809853792191,
1.7803012132645,
1.7750589847565,
1.8420933485031,
1.7863517999649,
1.8328944444656,
1.7793855667114,
1.5791050195694,
1.3564316034317,
1.5250737667084,
1.3155146837234,
1.014811873436,
.98235523700714,
.97552710771561,
.97035628557205,
1.0196926593781,
1.0393049716949,
.98315137624741,
.97613000869751,
.89980864524841,
.96626943349838,
.91009211540222,
.88530200719833,
.97303456068039,
.57794612646103,
.63377332687378,
.65829831361771,
.76562696695328,
.86465454101563,
.90414637327194,
.95180231332779,
.95238989591599,
.98833626508713,
1.0333099365234,
1.0851185321808,
1.1066001653671,
1.2293750047684,
1.233595252037,
1.1480363607407,
1.2962552309036,
1.2842413187027,
1.3106474876404,
1.5614050626755,
1.4672855138779,
1.2362524271011,
1.1855486631393,
1.1294020414352,
1.1046353578568,
1.0858771800995,
1.0716745853424,
1.0786685943604,
1.0662157535553,
1.0390332937241,
.96519494056702,
.9802839756012,
.92070508003235,
.91108840703964,
.95705932378769,
.95637094974518,
.97360169887543,
1.0221517086029,
.9701629281044,
.94854199886322,
.98542231321335,
1.048855304718,
1.0081344842911,
1.0305507183075,
1.0475262403488,
.93612504005432,
.85176283121109,
.89438372850418,
.820152759552,
.71068543195724,
.76979607343674,
.76130604743958,
.77262878417969,
.85220617055893,
.84146595001221,
.93983960151672,
.97883212566376,
1.0793634653091,
1.1909983158112,
1.1690304279327,
1.2411522865295,
1.1360056400299,
1.0918840169907,
.9164656996727,
.76586949825287,
.918093085289,
.87360894680023,
.92867678403854,
1.00588285923,
.92233866453171,
.84132260084152,
.90422683954239,
.9873673915863,
.99707210063934,
1.1109310388565,
1.1971517801285,
1.138188958168,
1.2710473537445,
1.1763968467712,
1.7437561750412,
1.4101150035858,
1.3527159690857,
1.4335050582886,
.99765706062317,
1.1067585945129,
1.3086627721786,
1.2968333959579,
1.3547962903976,
1.6768488883972,
1.5905654430389,
2.0774590969086,
1.3218278884888,
.21813294291496,
.30750840902328,
.60612773895264])
y = np.array([np.nan,
29.809606552124,
29.847261428833,
29.961139678955,
29.886075973511,
30.013628005981,
29.96342086792,
30.152379989624,
30.214540481567,
30.142219543457,
30.245149612427,
30.290935516357,
30.3401927948,
30.521595001221,
30.511829376221,
30.683492660522,
30.734575271606,
30.764270782471,
30.996646881104,
31.046964645386,
31.252710342407,
31.242681503296,
31.308164596558,
31.410068511963,
31.582162857056,
31.680667877197,
31.897289276123,
31.956798553467,
32.207256317139,
32.652923583984,
32.8166847229,
33.252780914307,
33.267993927002,
33.468269348145,
33.786235809326,
34.099838256836,
34.527889251709,
34.831386566162,
35.369533538818,
35.780666351318,
36.189102172852,
36.830989837646,
37.344959259033,
37.855541229248,
38.481304168701,
39.100814819336,
39.480087280273,
39.9821434021,
40.483695983887,
40.631618499756,
41.145435333252,
41.420402526855,
41.701438903809,
41.987079620361,
42.276206970215,
42.685726165771,
43.210681915283,
44.318336486816,
44.811107635498,
46.365386962891,
47.646724700928,
49.026069641113,
50.268405914307,
52.089218139648,
53.409706115723,
54.018714904785,
55.003063201904,
55.873458862305,
56.751037597656,
56.927551269531,
57.840549468994,
58.750389099121,
59.540088653564,
60.921043395996,
61.693592071533,
62.472805023193,
63.610321044922,
64.856483459473,
66.562438964844,
68.24267578125,
69.667938232422,
71.911323547363,
74.473167419434,
76.760215759277,
79.732650756836,
82.780990600586,
84.380302429199,
86.475059509277,
89.042091369629,
90.886352539063,
93.332893371582,
95.179389953613,
95.979103088379,
96.356430053711,
99.02507019043,
99.415512084961,
98.914810180664,
99.782356262207,
100.7755279541,
101.770362854,
103.11968994141,
104.33930969238,
105.083152771,
106.07612609863,
106.59980773926,
107.96627044678,
108.61009216309,
109.38529968262,
110.87303924561,
109.27794647217,
110.13377380371,
110.85829162598,
112.16562652588,
113.56465148926,
114.70414733887,
115.95180511475,
116.95239257813,
118.188331604,
119.53330993652,
120.98512268066,
122.30659484863,
124.3293762207,
125.73359680176,
126.54803466797,
128.79624938965,
130.18423461914,
131.81065368652,
134.96139526367,
136.16728210449,
136.33625793457,
137.38554382324,
138.32939147949,
139.40463256836,
140.48587036133,
141.57167053223,
142.77867126465,
143.86622619629,
144.83903503418,
145.46519470215,
146.58029174805,
147.220703125,
148.11108398438,
149.35705566406,
150.35636901855,
151.47360229492,
152.82215881348,
153.5701751709,
154.44854736328,
155.68542480469,
157.14886474609,
158.00813293457,
159.23054504395,
160.44752502441,
160.83612060547,
161.25175476074,
162.39437866211,
162.82015991211,
162.91067504883,
163.96978759766,
164.66130065918,
165.47262573242,
166.75219726563,
167.54145812988,
169.03984069824,
170.27883911133,
171.9793548584,
173.89099121094,
175.06903076172,
176.84115600586,
177.5359954834,
178.49188232422,
178.5164642334,
178.46586608887,
180.21809387207,
180.8736114502,
182.12867736816,
183.60589599609,
184.12232971191,
184.54132080078,
185.80421447754,
187.28736877441,
188.39706420898,
190.2109375,
191.99716186523,
192.93818664551,
195.07104492188,
195.8763885498,
200.94375610352,
200.81010437012,
202.05271911621,
204.13349914551,
202.89764404297,
204.68077087402,
207.22866821289,
208.63482666016,
210.48779296875,
214.17184448242,
215.58755493164,
220.68745422363,
218.21083068848,
212.39213562012,
212.978515625,
215.07511901855])
resid = np.array([np.nan,
-.6596063375473,
-.49726036190987,
-.5911386013031,
-.34607490897179,
-.46362805366516,
-.21342028677464,
-.31237986683846,
-.40454092621803,
-.22221945226192,
-.26514956355095,
-.2509354352951,
-.13019436597824,
-.30159646272659,
-.1318296790123,
-.24349159002304,
-.25457563996315,
-.07427024841309,
-.24664734303951,
-.10696394741535,
-.30270880460739,
-.22268049418926,
-.18816292285919,
-.13006833195686,
-.20216277241707,
-.10066751390696,
-.24728938937187,
-.07679972797632,
.07274255156517,
-.20292413234711,
.03331403434277,
-.35277983546257,
-.16799576580524,
-.06826904416084,
-.08623649924994,
.00015908146452,
-.12788754701614,
.06861615926027,
-.06953293830156,
-.08066567778587,
.11089706420898,
-.03098993562162,
-.04496069997549,
.04446176066995,
.01869462057948,
-.20081178843975,
-.08008606731892,
-.08214038610458,
-.38369914889336,
-.03162068501115,
-.24543529748917,
-.22040157020092,
-.20144037902355,
-.18708138167858,
-.07620526105165,
.01427639275789,
.48931872844696,
-.11833623051643,
.78889113664627,
.43461054563522,
.45327401161194,
.27393117547035,
.73159569501877,
.21077930927277,
-.40970605611801,
-.01871551014483,
-.10306061804295,
-.0734596773982,
-.65103828907013,
.0724478662014,
.05945380032063,
-.05038867890835,
.45991089940071,
-.12104434520006,
-.09359546005726,
.22719417512417,
.28968048095703,
.64352011680603,
.53756183385849,
.25732442736626,
.93205803632736,
1.0886732339859,
.72682982683182,
1.2397809028625,
1.1673469543457,
-.18098846077919,
.31969723105431,
.72494095563889,
.05790812522173,
.61364978551865,
.06710703670979,
-.77938556671143,
-.97910648584366,
1.1435683965683,
-.92507529258728,
-1.5155116319656,
-.11481033265591,
.01764474436641,
.02447287365794,
.32963913679123,
.18031190335751,
-.23930950462818,
.01684862375259,
-.37613153457642,
.40019443631172,
-.2662724852562,
-.11008904129267,
.51469951868057,
-2.1730391979218,
.22205695509911,
.06622361391783,
.54170626401901,
.53436845541,
.2353515625,
.29585054516792,
.04819770529866,
.24760706722736,
.31166675686836,
.36669155955315,
.21487690508366,
.79340130090714,
.17062658071518,
-.33359375596046,
.95196217298508,
.10373862832785,
.31576481461525,
1.589346408844,
-.26140204071999,
-1.0672763586044,
-.13626158237457,
-.18554861843586,
-.02939598634839,
-.00464448658749,
.01412893645465,
.1283223181963,
.02133745700121,
-.06621573865414,
-.33903631567955,
.13481116294861,
-.28028702735901,
-.02071117423475,
.28890857100487,
.04294065013528,
.14363515377045,
.32640132308006,
-.22214868664742,
-.0701690018177,
.25145494937897,
.41458681225777,
-.14886146783829,
.19186246395111,
.16944620013237,
-.54752624034882,
-.43612506985664,
.2482432872057,
-.39438369870186,
-.62015581130981,
.28931456804276,
-.06979911774397,
.03869699314237,
.4273681640625,
-.05220314115286,
.55854320526123,
.26015737652779,
.62115871906281,
.72063958644867,
.00899865385145,
.53098171949387,
-.44116449356079,
-.13600566983223,
-.89187180995941,
-.81647485494614,
.83413660526276,
-.21809615194798,
.32638800144196,
.47133237123489,
-.4058920443058,
-.42233863472939,
.35867437720299,
.49578228592873,
.11262346804142,
.70294010639191,
.58906590938568,
-.19715182483196,
.86181098222733,
-.37105345726013,
3.3236031532288,
-1.543759226799,
-.11011194437742,
.64728397130966,
-2.2335081100464,
.67635416984558,
1.2392344474792,
.10933646559715,
.49816474318504,
2.0072033405304,
-.17484994232655,
3.0224411487579,
-3.7984521389008,
-6.0368394851685,
.27887633442879,
1.4904805421829,
1.3098726272583])
yr = np.array([np.nan,
-.6596063375473,
-.49726036190987,
-.5911386013031,
-.34607490897179,
-.46362805366516,
-.21342028677464,
-.31237986683846,
-.40454092621803,
-.22221945226192,
-.26514956355095,
-.2509354352951,
-.13019436597824,
-.30159646272659,
-.1318296790123,
-.24349159002304,
-.25457563996315,
-.07427024841309,
-.24664734303951,
-.10696394741535,
-.30270880460739,
-.22268049418926,
-.18816292285919,
-.13006833195686,
-.20216277241707,
-.10066751390696,
-.24728938937187,
-.07679972797632,
.07274255156517,
-.20292413234711,
.03331403434277,
-.35277983546257,
-.16799576580524,
-.06826904416084,
-.08623649924994,
.00015908146452,
-.12788754701614,
.06861615926027,
-.06953293830156,
-.08066567778587,
.11089706420898,
-.03098993562162,
-.04496069997549,
.04446176066995,
.01869462057948,
-.20081178843975,
-.08008606731892,
-.08214038610458,
-.38369914889336,
-.03162068501115,
-.24543529748917,
-.22040157020092,
-.20144037902355,
-.18708138167858,
-.07620526105165,
.01427639275789,
.48931872844696,
-.11833623051643,
.78889113664627,
.43461054563522,
.45327401161194,
.27393117547035,
.73159569501877,
.21077930927277,
-.40970605611801,
-.01871551014483,
-.10306061804295,
-.0734596773982,
-.65103828907013,
.0724478662014,
.05945380032063,
-.05038867890835,
.45991089940071,
-.12104434520006,
-.09359546005726,
.22719417512417,
.28968048095703,
.64352011680603,
.53756183385849,
.25732442736626,
.93205803632736,
1.0886732339859,
.72682982683182,
1.2397809028625,
1.1673469543457,
-.18098846077919,
.31969723105431,
.72494095563889,
.05790812522173,
.61364978551865,
.06710703670979,
-.77938556671143,
-.97910648584366,
1.1435683965683,
-.92507529258728,
-1.5155116319656,
-.11481033265591,
.01764474436641,
.02447287365794,
.32963913679123,
.18031190335751,
-.23930950462818,
.01684862375259,
-.37613153457642,
.40019443631172,
-.2662724852562,
-.11008904129267,
.51469951868057,
-2.1730391979218,
.22205695509911,
.06622361391783,
.54170626401901,
.53436845541,
.2353515625,
.29585054516792,
.04819770529866,
.24760706722736,
.31166675686836,
.36669155955315,
.21487690508366,
.79340130090714,
.17062658071518,
-.33359375596046,
.95196217298508,
.10373862832785,
.31576481461525,
1.589346408844,
-.26140204071999,
-1.0672763586044,
-.13626158237457,
-.18554861843586,
-.02939598634839,
-.00464448658749,
.01412893645465,
.1283223181963,
.02133745700121,
-.06621573865414,
-.33903631567955,
.13481116294861,
-.28028702735901,
-.02071117423475,
.28890857100487,
.04294065013528,
.14363515377045,
.32640132308006,
-.22214868664742,
-.0701690018177,
.25145494937897,
.41458681225777,
-.14886146783829,
.19186246395111,
.16944620013237,
-.54752624034882,
-.43612506985664,
.2482432872057,
-.39438369870186,
-.62015581130981,
.28931456804276,
-.06979911774397,
.03869699314237,
.4273681640625,
-.05220314115286,
.55854320526123,
.26015737652779,
.62115871906281,
.72063958644867,
.00899865385145,
.53098171949387,
-.44116449356079,
-.13600566983223,
-.89187180995941,
-.81647485494614,
.83413660526276,
-.21809615194798,
.32638800144196,
.47133237123489,
-.4058920443058,
-.42233863472939,
.35867437720299,
.49578228592873,
.11262346804142,
.70294010639191,
.58906590938568,
-.19715182483196,
.86181098222733,
-.37105345726013,
3.3236031532288,
-1.543759226799,
-.11011194437742,
.64728397130966,
-2.2335081100464,
.67635416984558,
1.2392344474792,
.10933646559715,
.49816474318504,
2.0072033405304,
-.17484994232655,
3.0224411487579,
-3.7984521389008,
-6.0368394851685,
.27887633442879,
1.4904805421829,
1.3098726272583])
mse = np.array([ 1.0121052265167,
.66349595785141,
.65449619293213,
.64957880973816,
.64683443307877,
.64528465270996,
.64440369606018,
.64390099048615,
.64361357688904,
.64344894886017,
.64335465431213,
.64330065250397,
.64326965808868,
.64325189590454,
.64324170351028,
.6432358622551,
.64323252439499,
.64323055744171,
.64322948455811,
.64322882890701,
.64322847127914,
.64322829246521,
.64322817325592,
.64322811365128,
.64322805404663,
.64322805404663,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199])
stdp = np.array([ .82960641384125,
.82960641384125,
.697261095047,
.61113905906677,
.51607495546341,
.47362637519836,
.41342103481293,
.40238001942635,
.37454023957253,
.33222004771233,
.32514902949333,
.31093680858612,
.30019253492355,
.31159669160843,
.29182952642441,
.30349296331406,
.29457464814186,
.28427124023438,
.30664679408073,
.29696446657181,
.31270903348923,
.29268020391464,
.28816330432892,
.29006817936897,
.30216124653816,
.30066826939583,
.31728908419609,
.30679926276207,
.3272570669651,
.37292611598969,
.36668366193771,
.40278288722038,
.36799272894859,
.36827209591866,
.38623574376106,
.39983862638474,
.42789059877396,
.43138384819031,
.46953064203262,
.48066720366478,
.48910140991211,
.53098994493484,
.54496067762375,
.55554050207138,
.58130383491516,
.60081332921982,
.58008605241776,
.58214038610458,
.58369606733322,
.53162068128586,
.54543834924698,
.52040082216263,
.50143963098526,
.48708060383797,
.47620677947998,
.48572361469269,
.51068127155304,
.61833620071411,
.61110657453537,
.76539021730423,
.84672522544861,
.92606955766678,
.96840506792068,
1.0892199277878,
1.1097067594528,
1.0187155008316,
1.0030621290207,
.97345739603043,
.95103752613068,
.82755368947983,
.84054774045944,
.85038793087006,
.84008830785751,
.92104357481003,
.89359468221664,
.87280809879303,
.91032028198242,
.95647835731506,
1.0624366998672,
1.1426770687103,
1.1679404973984,
1.311328291893,
1.473167181015,
1.5602221488953,
1.7326545715332,
1.8809853792191,
1.7803012132645,
1.7750589847565,
1.8420933485031,
1.7863517999649,
1.8328944444656,
1.7793855667114,
1.5791050195694,
1.3564316034317,
1.5250737667084,
1.3155146837234,
1.014811873436,
.98235523700714,
.97552710771561,
.97035628557205,
1.0196926593781,
1.0393049716949,
.98315137624741,
.97613000869751,
.89980864524841,
.96626943349838,
.91009211540222,
.88530200719833,
.97303456068039,
.57794612646103,
.63377332687378,
.65829831361771,
.76562696695328,
.86465454101563,
.90414637327194,
.95180231332779,
.95238989591599,
.98833626508713,
1.0333099365234,
1.0851185321808,
1.1066001653671,
1.2293750047684,
1.233595252037,
1.1480363607407,
1.2962552309036,
1.2842413187027,
1.3106474876404,
1.5614050626755,
1.4672855138779,
1.2362524271011,
1.1855486631393,
1.1294020414352,
1.1046353578568,
1.0858771800995,
1.0716745853424,
1.0786685943604,
1.0662157535553,
1.0390332937241,
.96519494056702,
.9802839756012,
.92070508003235,
.91108840703964,
.95705932378769,
.95637094974518,
.97360169887543,
1.0221517086029,
.9701629281044,
.94854199886322,
.98542231321335,
1.048855304718,
1.0081344842911,
1.0305507183075,
1.0475262403488,
.93612504005432,
.85176283121109,
.89438372850418,
.820152759552,
.71068543195724,
.76979607343674,
.76130604743958,
.77262878417969,
.85220617055893,
.84146595001221,
.93983960151672,
.97883212566376,
1.0793634653091,
1.1909983158112,
1.1690304279327,
1.2411522865295,
1.1360056400299,
1.0918840169907,
.9164656996727,
.76586949825287,
.918093085289,
.87360894680023,
.92867678403854,
1.00588285923,
.92233866453171,
.84132260084152,
.90422683954239,
.9873673915863,
.99707210063934,
1.1109310388565,
1.1971517801285,
1.138188958168,
1.2710473537445,
1.1763968467712,
1.7437561750412,
1.4101150035858,
1.3527159690857,
1.4335050582886,
.99765706062317,
1.1067585945129,
1.3086627721786,
1.2968333959579,
1.3547962903976,
1.6768488883972,
1.5905654430389,
2.0774590969086,
1.3218278884888,
.21813294291496,
.30750840902328,
.60612773895264])
icstats = np.array([ 202,
np.nan,
-242.06033399744,
4,
492.12066799488,
505.35373878448])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| bsd-3-clause | acee6294eb035e11edce5cac636ecd48 | 33.478532 | 229 | 0.401431 | 3.829287 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/tsa/tests/results/arima211nc_results.py | 35 | 36847 | import numpy as np
llf = np.array([-241.25977940638])
nobs = np.array([ 202])
k = np.array([ 4])
k_exog = np.array([ 1])
sigma = np.array([ .79533686587485])
chi2 = np.array([ 48655.961417345])
df_model = np.array([ 3])
k_ar = np.array([ 2])
k_ma = np.array([ 1])
params = np.array([ 1.1870704073154,
-.19095698898571,
-.90853757573555,
.79533686587485])
cov_params = np.array([ .00204336743511,
-.00177522179187,
-.00165894353702,
-.00031352141782,
-.00177522179187,
.00157376214003,
.00132907629148,
.00030367391511,
-.00165894353702,
.00132907629148,
.00210988984438,
.00024199988464,
-.00031352141782,
.00030367391511,
.00024199988464,
.00027937875185]).reshape(4,4)
xb = np.array([ 0,
0,
.11248598247766,
.14283391833305,
.0800810828805,
.12544548511505,
.07541109621525,
.1297073662281,
.10287435352802,
.06303016841412,
.09501431882381,
.08120259642601,
.07862555980682,
.10874316096306,
.06787430495024,
.10527064651251,
.08142036944628,
.07337106764317,
.11828763782978,
.08380854874849,
.11801292747259,
.07338324189186,
.0842502862215,
.09106454998255,
.10832596570253,
.09570593386889,
.1236881390214,
.09362822026014,
.13587079942226,
.19111332297325,
.14459040760994,
.21043147146702,
.12866979837418,
.16308072209358,
.19356986880302,
.20215991139412,
.23782986402512,
.22326464951038,
.28485587239265,
.27474755048752,
.28465977311134,
.34938132762909,
.3421268761158,
.35463020205498,
.39384591579437,
.41037485003471,
.36968034505844,
.39875456690788,
.40607318282127,
.32915702462196,
.40012913942337,
.35161358118057,
.34572568535805,
.34037715196609,
.3355179131031,
.35895752906799,
.38901025056839,
.53648668527603,
.43572762608528,
.69034379720688,
.69410443305969,
.76356476545334,
.77972346544266,
.95276647806168,
.9030898809433,
.76722019910812,
.84191131591797,
.82463103532791,
.82802563905716,
.66399103403091,
.79665386676788,
.80260843038559,
.78016436100006,
.91813576221466,
.80874294042587,
.80483394861221,
.8848432302475,
.92809981107712,
1.0597171783447,
1.1029140949249,
1.0864543914795,
1.3046631813049,
1.4528053998947,
1.4744025468826,
1.6993381977081,
1.816978096962,
1.5705223083496,
1.6871707439423,
1.8281806707382,
1.7127912044525,
1.8617957830429,
1.7624272108078,
1.5169456005096,
1.3543643951416,
1.8122490644455,
1.3362231254578,
1.0437293052673,
1.2371381521225,
1.2306576967239,
1.2056746482849,
1.2665351629257,
1.2366921901703,
1.1172571182251,
1.1408381462097,
1.0126565694809,
1.1675561666489,
1.0074961185455,
1.0045058727264,
1.1498116254807,
.44306626915932,
.85451871156693,
.81856834888458,
.94427144527435,
.99084824323654,
.95836746692657,
.994897544384,
.95328682661057,
1.0093784332275,
1.0500040054321,
1.0956697463989,
1.090208530426,
1.2714649438858,
1.1823015213013,
1.0575052499771,
1.373840212822,
1.2371203899384,
1.3022859096527,
1.6853868961334,
1.3395566940308,
1.0802086591721,
1.2114092111588,
1.1690926551819,
1.1775953769684,
1.1662193536758,
1.1558910608292,
1.1743551492691,
1.1441857814789,
1.1080147027969,
1.0106881856918,
1.0909667015076,
.97610247135162,
1.0038343667984,
1.0743995904922,
1.0255174636841,
1.0471519231796,
1.1034165620804,
.97707790136337,
.9856236577034,
1.0578545331955,
1.1219012737274,
1.0026258230209,
1.0733016729355,
1.0802255868912,
.89154416322708,
.85378932952881,
.98660898208618,
.82558387517929,
.71030122041702,
.88567733764648,
.80868631601334,
.82387971878052,
.92999804019928,
.83861750364304,
.99909782409668,
.97461491823196,
1.1019765138626,
1.1970175504684,
1.0780508518219,
1.2238110303879,
1.0100719928741,
1.0434579849243,
.81277370452881,
.72809249162674,
1.0880596637726,
.87798285484314,
.99824965000153,
1.0677480697632,
.86986482143402,
.81499886512756,
.97921711206436,
1.0504562854767,
.99342101812363,
1.1660186052322,
1.208247423172,
1.0516448020935,
1.3215674161911,
1.0694575309753,
2.0531799793243,
1.0617904663086,
1.2885792255402,
1.4795436859131,
.73947989940643,
1.290878534317,
1.506583571434,
1.3157633543015,
1.424609541893,
1.8879710435867,
1.4916514158249,
2.3532779216766,
.77780252695084,
-.27798706293106,
.7862361073494,
1.1202166080475])
y = np.array([np.nan,
28.979999542236,
29.26248550415,
29.492834091187,
29.450082778931,
29.665447235107,
29.625410079956,
29.879707336426,
29.942874908447,
29.873029708862,
30.015014648438,
30.06120300293,
30.118625640869,
30.318742752075,
30.287874221802,
30.485269546509,
30.521421432495,
30.553371429443,
30.808288574219,
30.833808898926,
31.058013916016,
31.023384094238,
31.104249954224,
31.211065292358,
31.388326644897,
31.475704193115,
31.703687667847,
31.743627548218,
32.015869140625,
32.471111297607,
32.594593048096,
33.060428619385,
33.028671264648,
33.263080596924,
33.593570709229,
33.902160644531,
34.337829589844,
34.623264312744,
35.184856414795,
35.574745178223,
35.984661102295,
36.649379730225,
37.142127990723,
37.654628753662,
38.293846130371,
38.910373687744,
39.269680023193,
39.798755645752,
40.306076049805,
40.42915725708,
41.00012588501,
41.251613616943,
41.545726776123,
41.840377807617,
42.135517120361,
42.558959960938,
43.089012145996,
44.236488342285,
44.635726928711,
46.290340423584,
47.494102478027,
48.863563537598,
50.079723358154,
51.952766418457,
53.203090667725,
53.767219543457,
54.841911315918,
55.724632263184,
56.628025054932,
56.763988494873,
57.796653747559,
58.702610015869,
59.480163574219,
60.91813659668,
61.608741760254,
62.404830932617,
63.584842681885,
64.828102111816,
66.559715270996,
68.202911376953,
69.586456298828,
71.904663085938,
74.45280456543,
76.67440032959,
79.699340820313,
82.716979980469,
84.170516967773,
86.387168884277,
89.028175354004,
90.812789916992,
93.361793518066,
95.16242980957,
95.916946411133,
96.354362487793,
99.31224822998,
99.436218261719,
98.943733215332,
100.03713989258,
101.03066253662,
102.00567626953,
103.36653137207,
104.5366973877,
105.21725463867,
106.24083709717,
106.71265411377,
108.1675567627,
108.70749664307,
109.50450897217,
111.04981231689,
109.14306640625,
110.35451507568,
111.01856231689,
112.34427642822,
113.6908416748,
114.7583694458,
115.99489593506,
116.95328521729,
118.20937347412,
119.55000305176,
120.9956741333,
122.29020690918,
124.37145996094,
125.68230438232,
126.45750427246,
128.87384033203,
130.13711547852,
131.80229187012,
135.08538818359,
136.03955078125,
136.18022155762,
137.4114074707,
138.36909484863,
139.47760009766,
140.56620788574,
141.65588378906,
142.87435913086,
143.94418334961,
144.90802001953,
145.51068115234,
146.69097900391,
147.27610778809,
148.2038269043,
149.47439575195,
150.4255065918,
151.5471496582,
152.90342712402,
153.57708740234,
154.4856262207,
155.75785827637,
157.22190856934,
158.00262451172,
159.2733001709,
160.48022460938,
160.79153442383,
161.25378417969,
162.4866027832,
162.82557678223,
162.9102935791,
164.08567810059,
164.70867919922,
165.52388000488,
166.82998657227,
167.53861999512,
169.09910583496,
170.27461242676,
172.00196838379,
173.89701843262,
174.97804260254,
176.82382202148,
177.41006469727,
178.44345092773,
178.41278076172,
178.42808532715,
180.38806152344,
180.87797546387,
182.1982421875,
183.66775512695,
184.06985473633,
184.51499938965,
185.87921142578,
187.35046386719,
188.3934173584,
190.26602172852,
192.00825500488,
192.85165405273,
195.12156677246,
195.76945495605,
201.25317382813,
200.4617767334,
201.98857116699,
204.17953491211,
202.63948059082,
204.86488342285,
207.42657470703,
208.65376281738,
210.55760192871,
214.38296508789,
215.48864746094,
220.96327209473,
217.66680908203,
211.89601135254,
213.45724487305,
215.58921813965])
resid = np.array([np.nan,
.17000007629395,
.08751478046179,
-.12283346056938,
.08991899341345,
-.11544716358185,
.12458966672421,
-.03970721364021,
-.13287504017353,
.04697044193745,
-.03501485288143,
-.02120122499764,
.09137260913849,
-.09874293208122,
.09212554246187,
-.04526927694678,
-.04142136126757,
.13662992417812,
-.0582881718874,
.10619198530912,
-.10801269859076,
-.00338354869746,
.01575009897351,
.06893529742956,
-.00832748971879,
.10429482907057,
-.05368844047189,
.13637132942677,
.26412883400917,
-.02111134678125,
.2554073035717,
-.16042841970921,
.07132714986801,
.13692232966423,
.1064293757081,
.19783779978752,
.0621731877327,
.27673536539078,
.11514183133841,
.12525399029255,
.31533870100975,
.15061867237091,
.15787313878536,
.24537208676338,
.20615255832672,
-.01037331111729,
.13031965494156,
.10124543309212,
-.20607624948025,
.1708429902792,
-.10012608766556,
-.05161434784532,
-.04572645947337,
-.04037792980671,
.06448362022638,
.14104247093201,
.61098974943161,
-.03648666664958,
.96427005529404,
.50965696573257,
.60589480400085,
.43643599748611,
.9202772974968,
.34723278880119,
-.20308908820152,
.23277981579304,
.05809023976326,
.07536666095257,
-.52802640199661,
.23601049184799,
.1033476293087,
-.00260917330161,
.51983487606049,
-.11813650280237,
-.00874368380755,
.29516834020615,
.31515756249428,
.67189866304398,
.54028129577637,
.29708743095398,
1.0135440826416,
1.095338344574,
.74719160795212,
1.3256005048752,
1.2006633281708,
-.11698111891747,
.52947622537613,
.81282931566238,
.07182084023952,
.68721032142639,
.03820572793484,
-.7624272108078,
-.91694712638855,
1.1456356048584,
-1.2122505903244,
-1.5362200737,
-.14372782409191,
-.23713812232018,
-.2306577116251,
.09432080388069,
-.06653053313494,
-.43669676780701,
-.11725706607103,
-.54083967208862,
.28734645247459,
-.467559248209,
-.20749309659004,
.39549562335014,
-2.3498160839081,
.3569367825985,
-.15452179312706,
.38143622875214,
.35572397708893,
.1091578528285,
.24162948131561,
.00510244909674,
.24671010673046,
.29062458872795,
.34999752044678,
.20432561635971,
.80979299545288,
.12853652238846,
-.28230002522469,
1.042493224144,
.02615367434919,
.36288577318192,
1.5977079868317,
-.38538381457329,
-.93954759836197,
.01978221163154,
-.21140915155411,
-.06908652186394,
-.07760456204414,
-.06621328741312,
.0441059358418,
-.07434900850058,
-.14418575167656,
-.40801778435707,
.08931794017553,
-.39096972346306,
-.07610860466957,
.19616261124611,
-.07439963519573,
.07448863238096,
.25285106897354,
-.30341354012489,
-.07708399742842,
.21437329053879,
.34215462207794,
-.22190742194653,
.19737112522125,
.12669529020786,
-.58022564649582,
-.3915441930294,
.24621678888798,
-.48660898208618,
-.6255869269371,
.28969877958298,
-.18568041920662,
-.00868325773627,
.37611722946167,
-.12999498844147,
.5613916516304,
.20089910924435,
.6253759264946,
.69802659749985,
.00297940592282,
.621961414814,
-.42382326722145,
-.01007199659944,
-.84344571828842,
-.71278285980225,
.87191361188889,
-.38806268572807,
.32201409339905,
.40175950527191,
-.4677571952343,
-.36986482143402,
.38499811291695,
.42079201340675,
.04953457415104,
.70659118890762,
.53397834300995,
-.20824746787548,
.94835525751114,
-.42157354950905,
3.4305424690247,
-1.8531830310822,
.23821261525154,
.71142077445984,
-2.2795467376709,
.93453133106232,
1.0551145076752,
-.08858433365822,
.47923478484154,
1.9373899698257,
-.38597220182419,
3.1213552951813,
-4.0742712020874,
-5.4928140640259,
.77499634027481,
1.0117527246475,
.79578375816345])
yr = np.array([np.nan,
.17000007629395,
.08751478046179,
-.12283346056938,
.08991899341345,
-.11544716358185,
.12458966672421,
-.03970721364021,
-.13287504017353,
.04697044193745,
-.03501485288143,
-.02120122499764,
.09137260913849,
-.09874293208122,
.09212554246187,
-.04526927694678,
-.04142136126757,
.13662992417812,
-.0582881718874,
.10619198530912,
-.10801269859076,
-.00338354869746,
.01575009897351,
.06893529742956,
-.00832748971879,
.10429482907057,
-.05368844047189,
.13637132942677,
.26412883400917,
-.02111134678125,
.2554073035717,
-.16042841970921,
.07132714986801,
.13692232966423,
.1064293757081,
.19783779978752,
.0621731877327,
.27673536539078,
.11514183133841,
.12525399029255,
.31533870100975,
.15061867237091,
.15787313878536,
.24537208676338,
.20615255832672,
-.01037331111729,
.13031965494156,
.10124543309212,
-.20607624948025,
.1708429902792,
-.10012608766556,
-.05161434784532,
-.04572645947337,
-.04037792980671,
.06448362022638,
.14104247093201,
.61098974943161,
-.03648666664958,
.96427005529404,
.50965696573257,
.60589480400085,
.43643599748611,
.9202772974968,
.34723278880119,
-.20308908820152,
.23277981579304,
.05809023976326,
.07536666095257,
-.52802640199661,
.23601049184799,
.1033476293087,
-.00260917330161,
.51983487606049,
-.11813650280237,
-.00874368380755,
.29516834020615,
.31515756249428,
.67189866304398,
.54028129577637,
.29708743095398,
1.0135440826416,
1.095338344574,
.74719160795212,
1.3256005048752,
1.2006633281708,
-.11698111891747,
.52947622537613,
.81282931566238,
.07182084023952,
.68721032142639,
.03820572793484,
-.7624272108078,
-.91694712638855,
1.1456356048584,
-1.2122505903244,
-1.5362200737,
-.14372782409191,
-.23713812232018,
-.2306577116251,
.09432080388069,
-.06653053313494,
-.43669676780701,
-.11725706607103,
-.54083967208862,
.28734645247459,
-.467559248209,
-.20749309659004,
.39549562335014,
-2.3498160839081,
.3569367825985,
-.15452179312706,
.38143622875214,
.35572397708893,
.1091578528285,
.24162948131561,
.00510244909674,
.24671010673046,
.29062458872795,
.34999752044678,
.20432561635971,
.80979299545288,
.12853652238846,
-.28230002522469,
1.042493224144,
.02615367434919,
.36288577318192,
1.5977079868317,
-.38538381457329,
-.93954759836197,
.01978221163154,
-.21140915155411,
-.06908652186394,
-.07760456204414,
-.06621328741312,
.0441059358418,
-.07434900850058,
-.14418575167656,
-.40801778435707,
.08931794017553,
-.39096972346306,
-.07610860466957,
.19616261124611,
-.07439963519573,
.07448863238096,
.25285106897354,
-.30341354012489,
-.07708399742842,
.21437329053879,
.34215462207794,
-.22190742194653,
.19737112522125,
.12669529020786,
-.58022564649582,
-.3915441930294,
.24621678888798,
-.48660898208618,
-.6255869269371,
.28969877958298,
-.18568041920662,
-.00868325773627,
.37611722946167,
-.12999498844147,
.5613916516304,
.20089910924435,
.6253759264946,
.69802659749985,
.00297940592282,
.621961414814,
-.42382326722145,
-.01007199659944,
-.84344571828842,
-.71278285980225,
.87191361188889,
-.38806268572807,
.32201409339905,
.40175950527191,
-.4677571952343,
-.36986482143402,
.38499811291695,
.42079201340675,
.04953457415104,
.70659118890762,
.53397834300995,
-.20824746787548,
.94835525751114,
-.42157354950905,
3.4305424690247,
-1.8531830310822,
.23821261525154,
.71142077445984,
-2.2795467376709,
.93453133106232,
1.0551145076752,
-.08858433365822,
.47923478484154,
1.9373899698257,
-.38597220182419,
3.1213552951813,
-4.0742712020874,
-5.4928140640259,
.77499634027481,
1.0117527246475,
.79578375816345])
mse = np.array([ 1.4402351379395,
1.4402351379395,
.80966705083847,
.74677377939224,
.71241801977158,
.69108927249908,
.67678099870682,
.66667699813843,
.6592805981636,
.6537224650383,
.64946305751801,
.64614951610565,
.64354157447815,
.64147007465363,
.639812707901,
.63847899436951,
.63740062713623,
.63652545213699,
.63581293821335,
.63523155450821,
.63475602865219,
.63436657190323,
.63404709100723,
.63378477096558,
.63356912136078,
.63339179754257,
.63324582576752,
.63312560319901,
.63302659988403,
.63294500112534,
.63287770748138,
.63282227516174,
.63277649879456,
.63273876905441,
.63270765542984,
.63268196582794,
.63266080617905,
.63264334201813,
.63262891769409,
.63261699676514,
.63260716199875,
.63259905576706,
.63259238004684,
.63258683681488,
.63258230686188,
.63257849216461,
.63257539272308,
.63257282972336,
.63257074356079,
.63256901502609,
.63256752490997,
.63256633281708,
.63256537914276,
.63256454467773,
.63256388902664,
.63256335258484,
.63256287574768,
.63256251811981,
.63256222009659,
.63256192207336,
.63256174325943,
.6325615644455,
.63256138563156,
.63256126642227,
.63256120681763,
.63256108760834,
.63256102800369,
.63256096839905,
.63256096839905,
.6325609087944,
.63256084918976,
.63256084918976,
.63256084918976,
.63256078958511,
.63256078958511,
.63256078958511,
.63256078958511,
.63256078958511,
.63256078958511,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047,
.63256072998047])
icstats = np.array([ 202,
np.nan,
-241.25977940638,
4,
490.51955881276,
503.75262960236])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, icstats=icstats, )
| bsd-3-clause | dd5fda91562c6151b5a22216ec0c8f9e | 33.212628 | 218 | 0.403126 | 3.808869 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/datasets/cpunish/data.py | 3 | 2548 | """US Capital Punishment dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of state executions in 1997"""
DESCRLONG = """This data describes the number of times capital punishment is implemented
at the state level for the year 1997. The outcome variable is the number of
executions. There were executions in 17 states.
Included in the data are explanatory variables for median per capita income
in dollars, the percent of the population classified as living in poverty,
the percent of Black citizens in the population, the rate of violent
crimes per 100,000 residents for 1996, a dummy variable indicating
whether the state is in the South, and (an estimate of) the proportion
of the population with a college degree of some kind.
"""
NOTE = """
Number of Observations - 17
Number of Variables - 7
Variable name definitions::
EXECUTIONS - Executions in 1996
INCOME - Median per capita income in 1996 dollars
PERPOVERTY - Percent of the population classified as living in poverty
PERBLACK - Percent of black citizens in the population
VC100k96 - Rate of violent crimes per 100,00 residents for 1996
SOUTH - SOUTH == 1 indicates a state in the South
DEGREE - An esimate of the proportion of the state population with a
college degree of some kind
State names are included in the data file, though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/cpunish.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
| bsd-3-clause | d5f92df7b4e92ce0abdff6f9720424f0 | 31.666667 | 90 | 0.706829 | 3.65043 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/stats/tests/__init__.py | 218 | 6354 | '''
Econometrics for a Datarich Environment
=======================================
Introduction
------------
In many cases we are performing statistical analysis when many observed variables are
available, when we are in a data rich environment. Machine learning has a wide variety
of tools for dimension reduction and penalization when there are many varibles compared
to the number of observation. Chemometrics has a long tradition of using Partial Least
Squares, NIPALS and similar in these cases. In econometrics the same problem shows up
when there are either many possible regressors, many (weak) instruments or when there are
a large number of moment conditions in GMM.
This section is intended to collect some models and tools in this area that are relevant
for the statical analysis and econometrics.
Covariance Matrices
===================
Several methods are available to reduce the small sample noise in estimated covariance
matrices with many variable.
Some applications:
weighting matrix with many moments,
covariance matrix for portfolio choice
Dimension Reduction
===================
Principal Component and Partial Least Squares try to extract the important low dimensional
factors from the data with many variables.
Regression with many regressors
===============================
Factor models, selection of regressors and shrinkage and penalization are used to improve
the statistical properties, when the presence of too many regressors leads to over-fitting
and too noisy small sample estimators and statistics.
Regression with many moments or many instruments
================================================
The same tools apply and can be used in these two cases.
e.g. Tychonov regularization of weighting matrix in GMM, similar to Ridge regression, the
weighting matrix can be shrunk towards the identity matrix.
Simplest case will be part of GMM. I don't know how much will be standalone
functions.
Intended Content
================
PLS
---
what should be available in class?
Factormodel and supporting helper functions
-------------------------------------------
PCA based
~~~~~~~~~
First version based PCA on Stock/Watson and Bai/Ng, and recent papers on the
selection of the number of factors. Not sure about Forni et al. in approach.
Basic support of this needs additional results for PCA, error covariance matrix
of data on reduced factors, required for criteria in Bai/Ng.
Selection criteria based on eigenvalue cutoffs.
Paper on PCA and structural breaks. Could add additional results during
find_nfact to test for parameter stability. I haven't read the paper yet.
Idea: for forecasting, use up to h-step ahead endogenous variables to directly
get the forecasts.
Asymptotic results and distribution: not too much idea yet.
Standard OLS results are conditional on factors, paper by Haerdle (abstract
seems to suggest that this is ok, Park 2009).
Simulation: add function to simulate DGP of Bai/Ng and recent extension.
Sensitivity of selection criteria to heteroscedasticity and autocorrelation.
Bai, J. & Ng, S., 2002. Determining the Number of Factors in
Approximate Factor Models. Econometrica, 70(1), pp.191-221.
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Alessi, L., Barigozzi, M. & Capasso, M., 2010. Improved penalization
for determining the number of factors in approximate factor models.
Statistics & Probability Letters, 80(23-24), pp.1806-1813.
Breitung, J. & Eickmeier, S., Testing for structural breaks in dynamic
factor models. Journal of Econometrics, In Press, Accepted Manuscript.
Available at:
http://www.sciencedirect.com/science/article/B6VC0-51G3W92-1/2/f45ce2332443374fd770e42e5a68ddb4
[Accessed November 15, 2010].
Croux, C., Renault, E. & Werker, B., 2004. Dynamic factor models.
Journal of Econometrics, 119(2), pp.223-230.
Forni, M. et al., 2009. Opening the Black Box: Structural Factor
Models with Large Cross Sections. Econometric Theory, 25(05),
pp.1319-1347.
Forni, M. et al., 2000. The Generalized Dynamic-Factor Model:
Identification and Estimation. Review of Economics and Statistics,
82(4), pp.540-554.
Forni, M. & Lippi, M., The general dynamic factor model: One-sided
representation results. Journal of Econometrics, In Press, Accepted
Manuscript. Available at:
http://www.sciencedirect.com/science/article/B6VC0-51FNPJN-1/2/4fcdd0cfb66e3050ff5d19bf2752ed19
[Accessed November 15, 2010].
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Park, B.U. et al., 2009. Time Series Modelling With Semiparametric
Factor Dynamics. Journal of the American Statistical Association,
104(485), pp.284-298.
other factor algorithm
~~~~~~~~~~~~~~~~~~~~~~
PLS should fit in reasonably well.
Bai/Ng have a recent paper, where they compare LASSO, PCA, and similar, individual
and in combination.
Check how much we can use scikits.learn for this.
miscellaneous
~~~~~~~~~~~~~
Time series modeling of factors for prediction, ARMA, VARMA.
SUR and correlation structure
What about sandwich estimation, robust covariance matrices?
Similarity to Factor-Garch and Go-Garch
Updating: incremental PCA, ...?
TODO next
=========
MVOLS : OLS with multivariate endogenous and identical exogenous variables.
rewrite and expand current varma_process.VAR
PCA : write a class after all, and/or adjust the current donated class
and keep adding required statistics, e.g.
residual variance, projection of X on k-factors, ... updating ?
FactorModelUnivariate : started, does basic principal component regression,
based on standard information criteria, not Bai/Ng adjusted
FactorModelMultivariate : follow pattern for univariate version and use
MVOLS
'''
| bsd-3-clause | a9e0fe95e0e7e82118585fa9e0049469 | 37.509091 | 99 | 0.748033 | 3.766449 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/distributions/tests/_est_fit.py | 5 | 2608 | # NOTE: contains only one test, _est_cont_fit, that is renamed so that
# nose doesn't run it
# I put this here for the record and for the case when someone wants to
# verify the quality of fit
# with current parameters: relatively small sample size, default starting values
# Ran 84 tests in 401.797s
# FAILED (failures=15)
import numpy.testing as npt
import numpy as np
from scipy import stats
from distparams import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
n_repl1 = 1000 # sample size for first run
n_repl2 = 5000 # sample size for second run, if first run fails
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
#distcont = [['genextreme', (3.3184017469423535,)]]
def _est_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample size <= 10000
for distname, arg in distcont:
yield check_cont_fit, distname,arg
def check_cont_fit(distname,arg):
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n_repl1,*arg)
est = distfn.fit(rvs) #,*arg) # start with default values
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
txt = ''
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.ones(distfn.numargs+2)*thresh_min]),0)
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.any((np.abs(diff) - diffthreshold) > 0.0):
## txt = 'WARNING - diff too large with small sample'
## print 'parameter diff =', diff - diffthreshold, txt
rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)])
est = distfn.fit(rvs) #,*arg)
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
if np.any((np.abs(diff) - diffthreshold) > 0.0):
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
if __name__ == "__main__":
import nose
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False)
| bsd-3-clause | 87d733bfb0e8ae73b9a1300acfe29427 | 36.257143 | 85 | 0.634202 | 3.445178 | false | true | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/tests/maketests_mlabwrap.py | 36 | 9022 | '''generate py modules with test cases and results from mlabwrap
currently matlab: princomp, garchar, garchma
'''
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy import array
xo = array([[ -419, -731, -1306, -1294],
[ 6, 529, -200, -437],
[ -27, -833, -6, -564],
[ -304, -273, -502, -739],
[ 1377, -912, 927, 280],
[ -375, -517, -514, 49],
[ 247, -504, 123, -259],
[ 712, 534, -773, 286],
[ 195, -1080, 3256, -178],
[ -854, 75, -706, -1084],
[-1219, -612, -15, -203],
[ 550, -628, -483, -2686],
[ -365, 1376, -1266, 317],
[ -489, 544, -195, 431],
[ -656, 854, 840, -723],
[ 16, -1385, -880, -460],
[ 258, -2252, 96, 54],
[ 2049, -750, -1115, 381],
[ -65, 280, -777, 416],
[ 755, 82, -806, 1027],
[ -39, -170, -2134, 743],
[ -859, 780, 746, -133],
[ 762, 252, -450, -459],
[ -941, -202, 49, -202],
[ -54, 115, 455, 388],
[-1348, 1246, 1430, -480],
[ 229, -535, -1831, 1524],
[ -651, -167, 2116, 483],
[-1249, -1373, 888, -1092],
[ -75, -2162, 486, -496],
[ 2436, -1627, -1069, 162],
[ -63, 560, -601, 587],
[ -60, 1051, -277, 1323],
[ 1329, -1294, 68, 5],
[ 1532, -633, -923, 696],
[ 669, 895, -1762, -375],
[ 1129, -548, 2064, 609],
[ 1320, 573, 2119, 270],
[ -213, -412, -2517, 1685],
[ 73, -979, 1312, -1220],
[-1360, -2107, -237, 1522],
[ -645, 205, -543, -169],
[ -212, 1072, 543, -128],
[ -352, -129, -605, -904],
[ 511, 85, 167, -1914],
[ 1515, 1862, 942, 1622],
[ -465, 623, -495, -89],
[-1396, -979, 1758, 128],
[ -255, -47, 980, 501],
[-1282, -58, -49, -610],
[ -889, -1177, -492, 494],
[ 1415, 1146, 696, -722],
[ 1237, -224, -1609, -64],
[ -528, -1625, 231, 883],
[ -327, 1636, -476, -361],
[ -781, 793, 1882, 234],
[ -506, -561, 1988, -810],
[-1233, 1467, -261, 2164],
[ 53, 1069, 824, 2123],
[-1200, -441, -321, 339],
[ 1606, 298, -995, 1292],
[-1740, -672, -1628, -129],
[-1450, -354, 224, -657],
[-2556, 1006, -706, -1453],
[ -717, -463, 345, -1821],
[ 1056, -38, -420, -455],
[ -523, 565, 425, 1138],
[-1030, -187, 683, 78],
[ -214, -312, -1171, -528],
[ 819, 736, -265, 423],
[ 1339, 351, 1142, 579],
[ -387, -126, -1573, 2346],
[ 969, 2, 327, -134],
[ 163, 227, 90, 2021],
[ 1022, -1076, 174, 304],
[ 1042, 1317, 311, 880],
[ 2018, -840, 295, 2651],
[ -277, 566, 1147, -189],
[ 20, 467, 1262, 263],
[ -663, 1061, -1552, -1159],
[ 1830, 391, 2534, -199],
[ -487, 752, -1061, 351],
[-2138, -556, -367, -457],
[ -868, -411, -559, 726],
[ 1770, 819, -892, -363],
[ 553, -736, -169, -490],
[ 388, -503, 809, -821],
[ -516, -1452, -192, 483],
[ 493, 2904, 1318, 2591],
[ 175, 584, -1001, 1675],
[ 1316, -1596, -460, 1500],
[ 1212, 214, -644, -696],
[ -501, 338, 1197, -841],
[ -587, -469, -1101, 24],
[-1205, 1910, 659, 1232],
[ -150, 398, 594, 394],
[ 34, -663, 235, -334],
[-1580, 647, 239, -351],
[-2177, -345, 1215, -1494],
[ 1923, 329, -152, 1128]])
x = xo/1000.
class HoldIt(object):
def __init__(self, name):
self.name = name
def save(self, what=None, filename=None, header=True, useinstant=True,
comment=None):
if what is None:
what = (i for i in self.__dict__ if i[0] != '_')
if header:
txt = ['import numpy as np\nfrom numpy import array\n\n']
if useinstant:
txt.append('class Holder(object):\n pass\n\n')
else:
txt = []
if useinstant:
txt.append('%s = Holder()' % self.name)
prefix = '%s.' % self.name
else:
prefix = ''
if not comment is None:
txt.append("%scomment = '%s'" % (prefix, comment))
for x in what:
txt.append('%s%s = %s' % (prefix, x, repr(getattr(self,x))))
txt.extend(['','']) #add empty lines at end
if not filename is None:
file(filename, 'a+').write('\n'.join(txt))
return txt
def generate_princomp(xo, filen='testsave.py'):
# import mlabwrap only when run as script
import mlabwrap
from mlabwrap import mlab
np.set_printoptions(precision=14, linewidth=100)
data = HoldIt('data')
data.xo = xo
data.save(filename='testsave.py', comment='generated data, divide by 1000')
res_princomp = HoldIt('princomp1')
res_princomp.coef, res_princomp.factors, res_princomp.values = \
mlab.princomp(x, nout=3)
res_princomp.save(filename=filen, header=False,
comment='mlab.princomp(x, nout=3)')
res_princomp = HoldIt('princomp2')
res_princomp.coef, res_princomp.factors, res_princomp.values = \
mlab.princomp(x[:20,], nout=3)
np.set_printoptions(precision=14, linewidth=100)
res_princomp.save(filename=filen, header=False,
comment='mlab.princomp(x[:20,], nout=3)')
res_princomp = HoldIt('princomp3')
res_princomp.coef, res_princomp.factors, res_princomp.values = \
mlab.princomp(x[:20,]-x[:20,].mean(0), nout=3)
np.set_printoptions(precision=14, linewidth=100)
res_princomp.save(filename=filen, header=False,
comment='mlab.princomp(x[:20,]-x[:20,].mean(0), nout=3)')
def generate_armarep(filen='testsave.py'):
# import mlabwrap only when run as script
import mlabwrap
from mlabwrap import mlab
res_armarep = HoldIt('armarep')
res_armarep.ar = np.array([1., -0.5, +0.8])
res_armarep.ma = np.array([1., -0.6, 0.08])
res_armarep.marep = mlab.garchma(-res_armarep.ar[1:], res_armarep.ma[1:], 20)
res_armarep.arrep = mlab.garchar(-res_armarep.ar[1:], res_armarep.ma[1:], 20)
res_armarep.save(filename=filen, header=False,
comment=("''mlab.garchma(-res_armarep.ar[1:], res_armarep.ma[1:], 20)\n" +
"mlab.garchar(-res_armarep.ar[1:], res_armarep.ma[1:], 20)''"))
def exampletest():
from statsmodels.sandbox import tsa
arrep = tsa.arma_impulse_response(res_armarep.ma, res_armarep.ar, nobs=21)[1:]
marep = tsa.arma_impulse_response(res_armarep.ar, res_armarep.ma, nobs=21)[1:]
assert_array_almost_equal(res_armarep.marep.ravel(), marep, 14)
#difference in sign convention to matlab for AR term
assert_array_almost_equal(-res_armarep.arrep.ravel(), arrep, 14)
if __name__ == '__main__':
import mlabwrap
from mlabwrap import mlab
import savedrvs
xo = savedrvs.rvsdata.xar2
x100 = xo[-100:]/1000.
x1000 = xo/1000.
filen = 'testsavetls.py'
res_pacf = HoldIt('mlpacf')
res_pacf.comment = 'mlab.parcorr(x, [], 2, nout=3)'
res_pacf.pacf100, res_pacf.lags100, res_pacf.bounds100 = \
mlab.parcorr(x100, [], 2, nout=3)
res_pacf.pacf1000, res_pacf.lags1000, res_pacf.bounds1000 = \
mlab.parcorr(x1000, [], 2, nout=3)
res_pacf.save(filename=filen, header=True)
res_acf = HoldIt('mlacf')
res_acf.comment = 'mlab.autocorr(x, [], 2, nout=3)'
res_acf.acf100, res_acf.lags100, res_acf.bounds100 = \
mlab.autocorr(x100, [], 2, nout=3)
res_acf.acf1000, res_acf.lags1000, res_acf.bounds1000 = \
mlab.autocorr(x1000, [], 2, nout=3)
res_acf.save(filename=filen, header=False)
res_ccf = HoldIt('mlccf')
res_ccf.comment = 'mlab.crosscorr(x[4:], x[:-4], [], 2, nout=3)'
res_ccf.ccf100, res_ccf.lags100, res_ccf.bounds100 = \
mlab.crosscorr(x100[4:], x100[:-4], [], 2, nout=3)
res_ccf.ccf1000, res_ccf.lags1000, res_ccf.bounds1000 = \
mlab.crosscorr(x1000[4:], x1000[:-4], [], 2, nout=3)
res_ccf.save(filename=filen, header=False)
res_ywar = HoldIt('mlywar')
res_ywar.comment = "mlab.ar(x100-x100.mean(), 10, 'yw').a.ravel()"
mbaryw = mlab.ar(x100-x100.mean(), 10, 'yw')
res_ywar.arcoef100 = np.array(mbaryw.a.ravel())
mbaryw = mlab.ar(x1000-x1000.mean(), 20, 'yw')
res_ywar.arcoef1000 = np.array(mbaryw.a.ravel())
res_ywar.save(filename=filen, header=False)
| bsd-3-clause | 0376429c335875feee6649ab1b5b10a6 | 35.674797 | 86 | 0.495345 | 2.551471 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/examples/ex_feasible_gls_het_0.py | 3 | 6390 | # -*- coding: utf-8 -*-
"""Examples for linear model with heteroscedasticity estimated by feasible GLS
These are examples to check the results during developement.
The assumptions:
We have a linear model y = X*beta where the variance of an observation depends
on some explanatory variable Z (`exog_var`).
linear_model.WLS estimated the model for a given weight matrix
here we want to estimate also the weight matrix by two step or iterative WLS
Created on Wed Dec 21 12:28:17 2011
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS, WLS, GLS
from statsmodels.regression.feasible_gls import GLSHet, GLSHet2
from statsmodels.tools.tools import add_constant
examples = ['ex1']
if 'ex1' in examples:
nsample = 300 #different pattern last graph with 100 or 200 or 500
sig = 0.5
np.random.seed(9876789) #9876543)
X = np.random.randn(nsample, 3)
X = np.column_stack((np.ones((nsample,1)), X))
beta = [1, 0.5, -0.5, 1.]
y_true2 = np.dot(X, beta)
x1 = np.linspace(0, 1, nsample)
gamma = np.array([1, 3.])
#with slope 3 instead of two, I get negative weights, Not correct
# - was misspecified, but the negative weights are still possible with identity link
#gamma /= gamma.sum() #normalize assuming x1.max is 1
z_true = add_constant(x1)
winv = np.dot(z_true, gamma)
het_params = sig**2 * np.array([1, 3.]) # for squared
sig2_het = sig**2 * winv
weights_dgp = 1/winv
weights_dgp /= weights_dgp.max() #should be already normalized - NOT check normalization
#y2[:nsample*6/10] = y_true2[:nsample*6/10] + sig*1. * np.random.normal(size=nsample*6/10)
z0 = np.zeros(nsample)
z0[(nsample * 5)//10:] = 1 #dummy for 2 halfs of sample
z0 = add_constant(z0)
z1 = add_constant(x1)
noise = np.sqrt(sig2_het) * np.random.normal(size=nsample)
y2 = y_true2 + noise
X2 = X[:,[0,2]] #misspecified, missing regressor in main equation
X2 = X #correctly specigied
res_ols = OLS(y2, X2).fit()
print 'OLS beta estimates'
print res_ols.params
print 'OLS stddev of beta'
print res_ols.bse
print '\nWLS'
mod0 = GLSHet2(y2, X2, exog_var=winv)
res0 = mod0.fit()
print 'new version'
mod1 = GLSHet(y2, X2, exog_var=winv)
res1 = mod1.iterative_fit(2)
print 'WLS beta estimates'
print res1.params
print res0.params
print 'WLS stddev of beta'
print res1.bse
#compare with previous version GLSHet2, refactoring check
#assert_almost_equal(res1.params, np.array([ 0.37642521, 1.51447662]))
#this fails ??? more iterations? different starting weights?
print res1.model.weights/res1.model.weights.max()
#why is the error so small in the estimated weights ?
assert_almost_equal(res1.model.weights/res1.model.weights.max(), weights_dgp, 14)
print 'residual regression params'
print res1.results_residual_regression.params
print 'scale of model ?'
print res1.scale
print 'unweighted residual variance, note unweighted mean is not zero'
print res1.resid.var()
#Note weighted mean is zero:
#(res1.model.weights * res1.resid).mean()
doplots = True #False
if doplots:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, 'r-', label='fwls')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
#the next only works if w has finite support, discrete/categorical
#z = (w[:,None] == [1,4]).astype(float) #dummy variable
#z = (w0[:,None] == np.unique(w0)).astype(float) #dummy variable
#changed z0 contains dummy and constant
mod2 = GLSHet(y2, X2, exog_var=z0)
res2 = mod2.iterative_fit(3)
print res2.params
import statsmodels.api as sm
#z = sm.add_constant(w, prepend=True)
z = sm.add_constant(x1/x1.max())
mod3 = GLSHet(y2, X2, exog_var=z1)#, link=sm.families.links.log())
res3 = mod3.iterative_fit(20)
error_var_3 = res3.mse_resid/res3.model.weights
print res3.params
print "np.array(res3.model.history['ols_params'])"
print np.array(res3.model.history['ols_params'])
print "np.array(res3.model.history['self_params'])"
print np.array(res3.model.history['self_params'])
#Models 2 and 3 are equivalent with different parameterization of Z
print np.unique(res2.model.weights) #for discrete z only, only a few uniques
print np.unique(res3.model.weights)
print res3.summary()
print '\n\nResults of estimation of weights'
print '--------------------------------'
print res3.results_residual_regression.summary()
if doplots:
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, '-', label='fwls1')
plt.plot(x1, res2.fittedvalues, '-', label='fwls2')
plt.plot(x1, res3.fittedvalues, '-', label='fwls3')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
plt.figure()
plt.ylim(0, 5)
res_e2 = OLS(noise**2, z).fit()
plt.plot(noise**2, 'bo', alpha=0.5, label='dgp error**2')
plt.plot(res_e2.fittedvalues, lw=2, label='ols for noise**2')
#plt.plot(res3.model.weights, label='GLSHet weights')
plt.plot(error_var_3, lw=2, label='GLSHet error var')
plt.plot(res3.resid**2, 'ro', alpha=0.5, label='resid squared')
#plt.plot(weights_dgp, label='DGP weights')
plt.plot(sig**2 * winv, lw=2, label='DGP error var')
plt.legend()
plt.show()
'''Note these are close but maybe biased because of skewed distribution
>>> res3.mse_resid/res3.model.weights[-10:]
array([ 1.03115871, 1.03268209, 1.03420547, 1.03572885, 1.03725223,
1.03877561, 1.04029899, 1.04182237, 1.04334575, 1.04486913])
>>> res_e2.fittedvalues[-10:]
array([ 1.0401953 , 1.04171386, 1.04323242, 1.04475098, 1.04626954,
1.0477881 , 1.04930666, 1.05082521, 1.05234377, 1.05386233])
>>> sig**2 * w[-10:]
array([ 0.98647295, 0.98797595, 0.98947896, 0.99098196, 0.99248497,
0.99398798, 0.99549098, 0.99699399, 0.99849699, 1. ])
'''
| bsd-3-clause | d64a9fcc5f4274e674cf489e4e53d9cb | 35.936416 | 94 | 0.641158 | 2.966574 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/tsa/tests/results/results_ar.py | 35 | 9626 | import numpy as np
import os
class ARLagResults(object):
"""
Results are from R vars::VARselect for sunspot data.
Comands run were
var_select <- VARselect(SUNACTIVITY, lag.max=16, type=c("const"))
"""
def __init__(self, type="const"):
# order of results is AIC, HQ, SC, FPE
if type == "const":
ic = [6.311751824815273, 6.321813007357017, 6.336872456958734,
551.009492543133547, 5.647615009344886, 5.662706783157502,
5.685295957560077, 283.614444209634655, 5.634199640773091,
5.654322005856580, 5.684440905060013, 279.835333966272003,
5.639415797766900, 5.664568754121261, 5.702217378125553,
281.299267441683185, 5.646102475432464, 5.676286023057697,
5.721464371862848, 283.187210932784524, 5.628416873122441,
5.663631012018546, 5.716339085624555, 278.223839284844701,
5.584204185137150, 5.624448915304128, 5.684686713710994,
266.191975554941564, 5.541163244029505, 5.586438565467356,
5.654206088675081, 254.979353737235556, 5.483155367013447,
5.533461279722170, 5.608758527730753, 240.611088468544949,
5.489939895595428, 5.545276399575022, 5.628103372384465,
242.251199397394288, 5.496713895370946, 5.557080990621412,
5.647437688231713, 243.900349905069504, 5.503539311586831,
5.568936998108170, 5.666823420519329, 245.573823561989144,
5.510365149977393, 5.580793427769605, 5.686209574981622,
247.259396991133599, 5.513740912139918, 5.589199781203001,
5.702145653215877, 248.099655693709479, 5.515627471325321,
5.596116931659277, 5.716592528473011, 248.572915484827206,
5.515935627515806, 5.601455679120634, 5.729461000735226,
248.654927915301300]
self.ic = np.asarray(ic).reshape(4,-1, order='F')
class ARResultsOLS(object):
"""
Results of fitting an AR(9) model to the sunspot data.
Results were taken from Stata using the var command.
"""
def __init__(self, constant=True):
self.avobs = 300.
if constant:
self.params = [ 6.7430535917332, 1.1649421971129, -.40535742259304,
-.16653934246587, .14980629416032, -.09462417064796,
.00491001240749, .0504665930841, -.08635349190816,
.25349103194757]
# These are returned by stata VAR, using the (V)AR scale/sigma
# we return the true OLS bse by default
# the stata residuals can be achived by np.sqrt(np.diag(res1.cov_params()))
self.bse_stata = [2.413485601, .0560359041, .0874490762,
.0900894414, .0899348339, .0900100797,
.0898385666, .0896997939, .0869773089,
.0559505756]
# The below are grom gretl's ARIMA command with conditional maxium likelihood
self.bse_gretl = [2.45474, 0.0569939, 0.0889440, 0.0916295,
0.0914723, 0.0915488, 0.0913744, 0.0912332,
0.0884642, 0.0569071]
self.rmse = 15.1279294937327
self.fpe = 236.4827257929261
self.llf = -1235.559128419549
#NOTE: we use a different definition of these ic than Stata
# but our order selection results agree with R VARselect
# close to Stata for Lutkepohl but we penalize the ic for the trend terms
# self.bic = 8.427186938618863
# self.aic = 8.30372752279699
# self.hqic = 8.353136159250697
#NOTE: predictions were taken from gretl, but agree with Stata
# test predict
#TODO: remove one of the files
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"AROLSConstantPredict.csv")
predictresults = np.loadtxt(filename)
fv = predictresults[:300,0]
pv = predictresults[300:,1]
pv_lb = predictresults[300:,2]
pv_ub = predictresults[300:,3]
pv_se = predictresults[300:,4]
del predictresults
# cases - in sample predict
# n = -1, start = 0 (fitted values)
self.FVOLSnneg1start0 = fv
# n=-1, start=9
self.FVOLSnneg1start9 = fv
# n=-1, start=100
self.FVOLSnneg1start100 = fv[100-9:]
# n = 200, start = 0
self.FVOLSn200start0 = fv[:192]
# n = 200, start = 200
self.FVOLSn200start200 = np.hstack((fv[200-9:],pv[:101-9]))
# n = 200, start = -109 use above
self.FVOLSn200startneg109 = self.FVOLSn200start200
# n = 100, start = 325, post-sample forecasting
self.FVOLSn100start325 = np.hstack((fv[-1],pv))
# n = 301, start = 9
self.FVOLSn301start9 = np.hstack((fv,pv[:2]))
# n = 301, start = 0
self.FVOLSdefault = fv
# n = 4, start = 312
self.FVOLSn4start312 = np.hstack((fv[-1],pv[:8]))
# n = 15, start = 312
self.FVOLSn15start312 = np.hstack((fv[-1],pv[:19]))
elif not constant:
self.params = [1.19582389902985, -0.40591818219637,
-0.15813796884843, 0.16620079925202,
-0.08570200254617, 0.01876298948686,
0.06130211910707, -0.08461507700047,
0.27995084653313]
self.bse_stata = [.055645055, .088579237, .0912031179, .0909032462,
.0911161784, .0908611473, .0907743174, .0880993504,
.0558560278]
self.bse_gretl = [0.0564990, 0.0899386, 0.0926027, 0.0922983,
0.0925145, 0.0922555, 0.0921674, 0.0894513,
0.0567132]
self.rmse = 15.29712618677774
self.sigma = 226.9820074869752
self.llf = -1239.41217278661
# See note above
# self.bic = 8.433861292817106
# self.hqic = 8.367215591385756
# self.aic = 8.322747818577421
self.fpe = 241.0221316614273
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"AROLSNoConstantPredict.csv")
predictresults = np.loadtxt(filename)
fv = predictresults[:300,0]
pv = predictresults[300:,1]
pv_lb = predictresults[300:,2]
pv_ub = predictresults[300:,3]
pv_se = predictresults[300:,4]
del predictresults
# cases - in sample predict
# n = -1, start = 0 (fitted values)
self.FVOLSnneg1start0 = fv
# n=-1, start=9
self.FVOLSnneg1start9 = fv
# n=-1, start=100
self.FVOLSnneg1start100 = fv[100-9:]
# n = 200, start = 0
self.FVOLSn200start0 = fv[:192]
# n = 200, start = 200
self.FVOLSn200start200 = np.hstack((fv[200-9:],pv[:101-9]))
# n = 200, start = -109 use above
self.FVOLSn200startneg109 = self.FVOLSn200start200
# n = 100, start = 325, post-sample forecasting
self.FVOLSn100start325 = np.hstack((fv[-1],pv))
# n = 301, start = 9
self.FVOLSn301start9 = np.hstack((fv,pv[:2]))
# n = 301, start = 0
self.FVOLSdefault = fv
# n = 4, start = 312
self.FVOLSn4start312 = np.hstack((fv[-1],pv[:8]))
# n = 15, start = 312
self.FVOLSn15start312 = np.hstack((fv[-1],pv[:19]))
class ARResultsMLE(object):
"""
Results of fitting an AR(9) model to the sunspot data using exact MLE.
Results were taken from gretl.
"""
def __init__(self, constant=True):
self.avobs = 300
if constant:
# NOTE: Stata's estimated parameters differ from gretl
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"ARMLEConstantPredict.csv")
filename2 = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'results_ar_forecast_mle_dynamic.csv')
predictresults = np.loadtxt(filename, delimiter=",")
year = predictresults[:,0]
pv = predictresults[:,1]
dynamicpv = np.genfromtxt(filename2, delimiter=",", skip_header=1)
# cases - in sample predict
# start = 0 (fitted values)
self.FVMLEdefault = pv[:309]
# start=9
self.FVMLEstart9end308 = pv[9:309]
# start=100, end=309
self.FVMLEstart100end308 = pv[100:309]
# start = 0, end
self.FVMLEstart0end200 = pv[:201]
# n = 200, start = 200
self.FVMLEstart200end334 = pv[200:]
# start = 309, end=334 post-sample forecasting
self.FVMLEstart308end334 = pv[308:]
# end = 310, start = 9
self.FVMLEstart9end309 = pv[9:310]
# end = 301, start = 0
self.FVMLEstart0end301 = pv[:302]
# end = 312, start = 4
self.FVMLEstart4end312 = pv[4:313]
# end = 7, start = 2
self.FVMLEstart2end7 = pv[2:8]
self.fcdyn = dynamicpv[:,0]
self.fcdyn2 = dynamicpv[:,1]
self.fcdyn3 = dynamicpv[:,2]
self.fcdyn4 = dynamicpv[:,3]
else:
pass
| bsd-3-clause | 08fae5bdf524f435559e28955c2a0122 | 43.155963 | 80 | 0.554436 | 3.159173 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/regression/tests/results/results_regression.py | 3 | 8038 | """
Hard-coded results for test_regression
"""
### REGRESSION MODEL RESULTS : OLS, GLS, WLS, AR###
import numpy as np
class Longley(object):
'''
The results for the Longley dataset were obtained from NIST
http://www.itl.nist.gov/div898/strd/general/dataarchive.html
Other results were obtained from Stata
'''
def __init__(self):
self.params = ( 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355, -3482258.63459582)
self.bse = (84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212, 890420.383607373)
self.scale = 92936.0061673238
self.rsquared = 0.995479004577296
self.rsquared_adj = 0.99246501
self.df_model = 6
self.df_resid = 9
self.ess = 184172401.944494
self.ssr = 836424.055505915
self.mse_model = 30695400.3240823
self.mse_resid = 92936.0061673238
self.mse_total = (self.ess + self.ssr) / (self.df_model + self.df_resid)
self.fvalue = 330.285339234588
self.llf = -109.6174
self.aic = 233.2349
self.bic = 238.643
self.pvalues = np.array([ 0.86314083, 0.31268106, 0.00253509,
0.00094437, 0.8262118 , 0.0030368 , 0.0035604 ])
#pvalues from rmodelwrap
self.resid = np.array((267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783))
def conf_int(self): # a method to be consistent with sm
return [(-177.0291,207.1524), (-.111581,.0399428),(-3.125065,
-.9153928),(-1.517948,-.5485049),(-.5625173,.4603083),
(798.7873,2859.515),(-5496529,-1467987)]
HC0_se=(51.22035, 0.02458, 0.38324, 0.14625, 0.15821,
428.38438, 832212)
HC1_se=(68.29380, 0.03277, 0.51099, 0.19499, 0.21094,
571.17917, 1109615)
HC2_se=(67.49208, 0.03653, 0.55334, 0.20522, 0.22324,
617.59295, 1202370)
HC3_se=(91.11939, 0.05562, 0.82213, 0.29879, 0.32491,
922.80784, 1799477)
class LongleyGls(object):
'''
The following results were obtained from running the test script with R.
'''
def __init__(self):
self.params = (6.73894832e-02, -4.74273904e-01, 9.48988771e+04)
self.bse = (1.07033903e-02, 1.53385472e-01, 1.39447723e+04)
self.llf = -121.4294962954981
self.fittedvalues = [59651.8255, 60860.1385, 60226.5336, 61467.1268,
63914.0846, 64561.9553, 64935.9028, 64249.1684, 66010.0426,
66834.7630, 67612.9309, 67018.8998, 68918.7758, 69310.1280,
69181.4207, 70598.8734]
self.resid = [671.174465, 261.861502, -55.533603, -280.126803,
-693.084618, -922.955349, 53.097212, -488.168351, 8.957367,
1022.236970, 556.069099, -505.899787, -263.775842, 253.871965,
149.579309, -47.873374]
self.scale = 542.443043098**2
self.tvalues = [6.296088, -3.092039, 6.805337]
self.pvalues = [2.761673e-05, 8.577197e-03, 1.252284e-05]
self.bic = 253.118790021
self.aic = 250.858992591
class CCardWLS(object):
def __init__(self):
self.params = [-2.6941851611, 158.426977524, -7.24928987289,
60.4487736936, -114.10886935]
self.bse = [3.807306306, 76.39115431, 9.724337321, 58.55088753,
139.6874965]
#NOTE: we compute the scale differently than they do for analytic
#weights
self.scale = 189.0025755829012 ** 2
self.rsquared = .2549143871187359
self.rsquared_adj = .2104316639616448
self.df_model = 4
self.df_resid = 67
self.ess = 818838.8079468152
self.ssr = 2393372.229657007
self.mse_model = 818838.8079468152 / 4
self.mse_resid = 2393372.229657007 / 67
self.mse_total = (self.ess + self.ssr) / 71.
self.fvalue = 5.730638077585917
self.llf = -476.9792946562806
self.aic = 963.95858931256
self.bic = 975.34191990764
# pvalues from R
self.pvalues = [0.4816259843354, 0.0419360764848, 0.4585895209814,
0.3055904431658, 0.4168883565685]
self.resid = [-286.964904785, -128.071563721, -405.860900879,
-20.1363945007, -169.824432373, -82.6842575073,
-283.314300537, -52.1719360352, 433.822174072,
-190.607543945, -118.839683533, -133.97076416,
-85.5728149414, 66.8180847168, -107.571769714,
-149.883285522, -140.972610474, 75.9255981445,
-135.979736328, -415.701263428, 130.080032349,
25.2313785553, 1042.14013672, -75.6622238159,
177.336639404, 315.870544434, -8.72801017761,
240.823760986, 54.6106033325, 65.6312484741,
-40.9218444824, 24.6115856171, -131.971786499,
36.1587944031, 92.5052108765, -136.837036133,
242.73274231, -65.0315093994, 20.1536407471,
-15.8874826431, 27.3513431549, -173.861785889,
-113.121154785, -37.1303443909, 1510.31530762,
582.916931152, -17.8628063202, -132.77381897,
-108.896934509, 12.4665794373, -122.014572144,
-158.986968994, -175.798873901, 405.886505127,
99.3692703247, 85.3450698853, -179.15007019,
-34.1245117188, -33.4909172058, -20.7287139893,
-116.217689514, 53.8837738037, -52.1533050537,
-100.632293701, 34.9342498779, -96.6685943604,
-367.32925415, -40.1300048828, -72.8692245483,
-60.8728256226, -35.9937324524, -222.944747925]
def conf_int(self): # a method to be consistent with sm
return [( -10.2936, 4.90523), ( 5.949595, 310.9044),
(-26.65915, 12.16057), (-56.41929, 177.3168),
(-392.9263, 164.7085)]
class LongleyRTO(object):
def __init__(self):
# Regression Through the Origin model
# from Stata, make sure you force double to replicate
self.params = [-52.993523, .07107319, -.42346599, -.57256869,
-.41420348, 48.417859]
self.bse = [129.5447812, .0301663805, .4177363573, .2789908665,
.3212848136, 17.68947719]
self.scale = 475.1655079819532**2
self.rsquared = .9999670130705958
self.rsquared_adj = .9999472209129532
self.df_model = 6
self.df_resid = 10
self.ess = 68443718827.40025
self.ssr = 2257822.599757476
self.mse_model = 68443718827.40025 / 6
self.mse_resid = 2257822.599757476 / 10
self.mse_total = (self.ess + self.ssr) / 16.
self.fvalue = 50523.39573737409
self.llf = -117.5615983965251
self.aic = 247.123196793
self.bic = 251.758729126
self.pvalues = [0.6911082828354, 0.0402241925699, 0.3346175334102,
0.0672506018552, 0.2263470345100, 0.0209367642585]
self.resid = [279.902740479, -130.324661255, 90.7322845459,
-401.312530518, -440.467681885, -543.54510498,
201.321121216, 215.908889771, 73.0936813354, 913.216918945,
424.824859619, -8.56475830078, -361.329742432,
27.3456058502, 151.28956604, -492.499359131]
def conf_int(self):
return [(-341.6373, 235.6502), ( .0038583, .1382881),
(-1.354241, .5073086), (-1.194199, .0490617),
(-1.130071, .3016637), ( 9.003248, 87.83247)]
| bsd-3-clause | 7148b2425383849980cc9356a254c87b | 45.732558 | 80 | 0.573277 | 2.711876 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/examples/thirdparty/ex_ratereturn.py | 4 | 4353 | # -*- coding: utf-8 -*-
"""Playing with correlation of DJ-30 stock returns
this uses pickled data that needs to be created with findow.py
to see graphs, uncomment plt.show()
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
import pickle
import statsmodels.api as sm
import statsmodels.sandbox as sb
import statsmodels.sandbox.tools as sbtools
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
try:
rrdm = pickle.load(file('dj30rr','rb'))
except Exception: #blanket for any unpickling error
print "Error with unpickling, a new pickle file can be created with findow_1"
raise
ticksym = rrdm.columns.tolist()
rr = rrdm.values[1:400]
rrcorr = np.corrcoef(rr, rowvar=0)
plot_corr(rrcorr, xnames=ticksym)
nvars = rrcorr.shape[0]
plt.figure()
plt.hist(rrcorr[np.triu_indices(nvars,1)])
plt.title('Correlation Coefficients')
xreda, facta, evaa, evea = sbtools.pcasvd(rr)
evallcs = (evaa).cumsum()
print evallcs/evallcs[-1]
xred, fact, eva, eve = sbtools.pcasvd(rr, keepdim=4)
pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA')
resid = rr-xred
residcorr = np.corrcoef(resid, rowvar=0)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals')
plt.matshow(residcorr)
plt.imshow(residcorr, cmap=plt.cm.jet, interpolation='nearest',
extent=(0,30,0,30), vmin=-1.0, vmax=1.0)
plt.colorbar()
normcolor = (0,1) #False #True
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
plot_corr(rrcorr, xnames=ticksym, normcolor=normcolor, ax=ax)
ax2 = fig.add_subplot(2,2,3)
#pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA',
normcolor=normcolor, ax=ax2)
ax3 = fig.add_subplot(2,2,4)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals',
normcolor=normcolor, ax=ax3)
import matplotlib as mpl
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
print images
print ax.get_children()
#cax = fig.add_subplot(2,2,2)
#[0.85, 0.1, 0.075, 0.8]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
fig.savefig('corrmatrixgrid.png', dpi=120)
has_sklearn = True
try:
import sklearn
except ImportError:
has_sklearn = False
print 'sklearn not available'
def cov2corr(cov):
std_ = np.sqrt(np.diag(cov))
corr = cov / np.outer(std_, std_)
return corr
if has_sklearn:
from sklearn.covariance import LedoitWolf, OAS, MCD
lw = LedoitWolf(store_precision=False)
lw.fit(rr, assume_centered=False)
cov_lw = lw.covariance_
corr_lw = cov2corr(cov_lw)
oas = OAS(store_precision=False)
oas.fit(rr, assume_centered=False)
cov_oas = oas.covariance_
corr_oas = cov2corr(cov_oas)
mcd = MCD()#.fit(rr, reweight=None)
mcd.fit(rr, assume_centered=False)
cov_mcd = mcd.covariance_
corr_mcd = cov2corr(cov_mcd)
titles = ['raw correlation', 'lw', 'oas', 'mcd']
normcolor = None
fig = plt.figure()
for i, c in enumerate([rrcorr, corr_lw, corr_oas, corr_mcd]):
#for i, c in enumerate([np.cov(rr, rowvar=0), cov_lw, cov_oas, cov_mcd]):
ax = fig.add_subplot(2,2,i+1)
plot_corr(c, xnames=None, title=titles[i],
normcolor=normcolor, ax=ax)
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
corrli = [rrcorr, corr_lw, corr_oas, corr_mcd, pcacorr]
diffssq = np.array([[((ci-cj)**2).sum() for ci in corrli]
for cj in corrli])
diffsabs = np.array([[np.max(np.abs(ci-cj)) for ci in corrli]
for cj in corrli])
print diffssq
print '\nmaxabs'
print diffsabs
fig.savefig('corrmatrix_sklearn.png', dpi=120)
fig2 = plot_corr_grid(corrli+[residcorr], ncols=3,
titles=titles+['pca', 'pca-residual'],
xnames=[], ynames=[])
fig2.savefig('corrmatrix_sklearn_2.png', dpi=120)
#plt.show()
#plt.close('all')
| bsd-3-clause | 8d6faab6e3e932b3c98ed65d86eed32a | 28.815068 | 100 | 0.668504 | 2.769084 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/stats/power.py | 3 | 47401 | # -*- coding: utf-8 -*-
#pylint: disable-msg=W0142
"""Statistical power, solving for nobs, ... - trial version
Created on Sat Jan 12 21:48:06 2013
Author: Josef Perktold
Example
roundtrip - root with respect to all variables
calculated, desired
nobs 33.367204205 33.367204205
effect 0.5 0.5
alpha 0.05 0.05
power 0.8 0.8
TODO:
refactoring
- rename beta -> power, beta (type 2 error is beta = 1-power) DONE
- I think the current implementation can handle any kinds of extra keywords
(except for maybe raising meaningful exceptions
- streamline code, I think internally classes can be merged
how to extend to k-sample tests?
user interface for different tests that map to the same (internal) test class
- sequence of arguments might be inconsistent,
arg and/or kwds so python checks what's required and what can be None.
- templating for docstrings ?
"""
import numpy as np
from scipy import stats, optimize
from statsmodels.tools.rootfinding import brentq_expanding
def ttest_power(effect_size, nobs, alpha, df=None, alternative='two-sided'):
'''Calculate power of a ttest
'''
d = effect_size
if df is None:
df = nobs - 1
if alternative in ['two-sided', '2s']:
alpha_ = alpha / 2. #no inplace changes, doesn't work
elif alternative in ['smaller', 'larger']:
alpha_ = alpha
else:
raise ValueError("alternative has to be 'two-sided', 'larger' " +
"or 'smaller'")
pow_ = 0
if alternative in ['two-sided', '2s', 'larger']:
crit_upp = stats.t.isf(alpha_, df)
#print crit_upp, df, d*np.sqrt(nobs)
# use private methods, generic methods return nan with negative d
if np.any(np.isnan(crit_upp)):
# avoid endless loop, https://github.com/scipy/scipy/issues/2667
pow_ = np.nan
else:
pow_ = stats.nct._sf(crit_upp, df, d*np.sqrt(nobs))
if alternative in ['two-sided', '2s', 'smaller']:
crit_low = stats.t.ppf(alpha_, df)
#print crit_low, df, d*np.sqrt(nobs)
if np.any(np.isnan(crit_low)):
pow_ = np.nan
else:
pow_ += stats.nct._cdf(crit_low, df, d*np.sqrt(nobs))
return pow_
def normal_power(effect_size, nobs, alpha, alternative='two-sided', sigma=1.):
'''Calculate power of a normal distributed test statistic
'''
d = effect_size
if alternative in ['two-sided', '2s']:
alpha_ = alpha / 2. #no inplace changes, doesn't work
elif alternative in ['smaller', 'larger']:
alpha_ = alpha
else:
raise ValueError("alternative has to be 'two-sided', 'larger' " +
"or 'smaller'")
pow_ = 0
if alternative in ['two-sided', '2s', 'larger']:
crit = stats.norm.isf(alpha_)
pow_ = stats.norm.sf(crit - d*np.sqrt(nobs)/sigma)
if alternative in ['two-sided', '2s', 'smaller']:
crit = stats.norm.ppf(alpha_)
pow_ += stats.norm.cdf(crit - d*np.sqrt(nobs)/sigma)
return pow_
def ftest_anova_power(effect_size, nobs, alpha, k_groups=2, df=None):
'''power for ftest for one way anova with k equal sized groups
nobs total sample size, sum over all groups
should be general nobs observations, k_groups restrictions ???
'''
df_num = nobs - k_groups
df_denom = k_groups - 1
crit = stats.f.isf(alpha, df_denom, df_num)
pow_ = stats.ncf.sf(crit, df_denom, df_num, effect_size**2 * nobs)
return pow_#, crit
def ftest_power(effect_size, df_num, df_denom, alpha, ncc=1):
'''Calculate the power of a F-test.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
df_num : int or float
numerator degrees of freedom.
df_denom : int or float
denominator degrees of freedom.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ncc : int
degrees of freedom correction for non-centrality parameter.
see Notes
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Notes
-----
sample size is given implicitly by df_num
set ncc=0 to match t-test, or f-test in LikelihoodModelResults.
ncc=1 matches the non-centrality parameter in R::pwr::pwr.f2.test
ftest_power with ncc=0 should also be correct for f_test in regression
models, with df_num and d_denom as defined there. (not verified yet)
'''
nc = effect_size**2 * (df_denom + df_num + ncc)
crit = stats.f.isf(alpha, df_denom, df_num)
pow_ = stats.ncf.sf(crit, df_denom, df_num, nc)
return pow_ #, crit, nc
#class based implementation
#--------------------------
class Power(object):
'''Statistical Power calculations, Base Class
so far this could all be class methods
'''
def __init__(self, **kwds):
self.__dict__.update(kwds)
# used only for instance level start values
self.start_ttp = dict(effect_size=0.01, nobs=10., alpha=0.15,
power=0.6, nobs1=10., ratio=1,
df_num=10, df_denom=3 # for FTestPower
)
# TODO: nobs1 and ratio are for ttest_ind,
# need start_ttp for each test/class separately,
# possible rootfinding problem for effect_size, starting small seems to
# work
from collections import defaultdict
self.start_bqexp = defaultdict(dict)
for key in ['nobs', 'nobs1', 'df_num', 'df_denom']:
self.start_bqexp[key] = dict(low=2., start_upp=50.)
for key in ['df_denom']:
self.start_bqexp[key] = dict(low=1., start_upp=50.)
for key in ['ratio']:
self.start_bqexp[key] = dict(low=1e-8, start_upp=2)
for key in ['alpha']:
self.start_bqexp[key] = dict(low=1e-12, upp=1 - 1e-12)
def power(self, *args, **kwds):
raise NotImplementedError
def _power_identity(self, *args, **kwds):
power_ = kwds.pop('power')
return self.power(*args, **kwds) - power_
def solve_power(self, **kwds):
'''solve for any one of the parameters of a t-test
for t-test the keywords are:
effect_size, nobs, alpha, power
exactly one needs to be ``None``, all others need numeric values
*attaches*
cache_fit_res : list
Cache of the result of the root finding procedure for the latest
call to ``solve_power``, mainly for debugging purposes.
The first element is the success indicator, one if successful.
The remaining elements contain the return information of the up to
three solvers that have been tried.
'''
#TODO: maybe use explicit kwds,
# nicer but requires inspect? and not generic across tests
# I'm duplicating this in the subclass to get informative docstring
key = [k for k,v in kwds.iteritems() if v is None]
#print kwds, key;
if len(key) != 1:
raise ValueError('need exactly one keyword that is None')
key = key[0]
if key == 'power':
del kwds['power']
return self.power(**kwds)
self._counter = 0
def func(x):
kwds[key] = x
fval = self._power_identity(**kwds)
self._counter += 1
#print self._counter,
if self._counter > 500:
raise RuntimeError('possible endless loop (500 NaNs)')
if np.isnan(fval):
return np.inf
else:
return fval
#TODO: I'm using the following so I get a warning when start_ttp is not defined
try:
start_value = self.start_ttp[key]
except KeyError:
start_value = 0.9
print 'Warning: using default start_value for', key
fit_kwds = self.start_bqexp[key]
fit_res = []
#print vars()
try:
val, res = brentq_expanding(func, full_output=True, **fit_kwds)
failed = False
fit_res.append(res)
except ValueError:
failed = True
fit_res.append(None)
success = None
if (not failed) and res.converged:
success = 1
else:
# try backup
#TODO: check more cases to make this robust
val, infodict, ier, msg = optimize.fsolve(func, start_value,
full_output=True) #scalar
#val = optimize.newton(func, start_value) #scalar
fval = infodict['fvec']
fit_res.append(infodict)
if ier == 1 and np.abs(fval) < 1e-4 :
success = 1
else:
#print infodict
if key in ['alpha', 'power', 'effect_size']:
val, r = optimize.brentq(func, 1e-8, 1-1e-8,
full_output=True) #scalar
success = 1 if r.converged else 0
fit_res.append(r)
else:
success = 0
if not success == 1:
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warnings.warn('finding solution failed', ConvergenceWarning)
#attach fit_res, for reading only, should be needed only for debugging
fit_res.insert(0, success)
self.cache_fit_res = fit_res
return val
def plot_power(self, dep_var='nobs', nobs=None, effect_size=None,
alpha=0.05, ax=None, title=None, plt_kwds=None, **kwds):
'''plot power with number of observations or effect size on x-axis
Parameters
----------
dep_var : string in ['nobs', 'effect_size', 'alpha']
This specifies which variable is used for the horizontal axis.
If dep_var='nobs' (default), then one curve is created for each
value of ``effect_size``. If dep_var='effect_size' or alpha, then
one curve is created for each value of ``nobs``.
nobs : scalar or array_like
specifies the values of the number of observations in the plot
effect_size : scalar or array_like
specifies the values of the effect_size in the plot
alpha : float or array_like
The significance level (type I error) used in the power
calculation. Can only be more than a scalar, if ``dep_var='alpha'``
ax : None or axis instance
If ax is None, than a matplotlib figure is created. If ax is a
matplotlib axis instance, then it is reused, and the plot elements
are created with it.
title : string
title for the axis. Use an empty string, ``''``, to avoid a title.
plt_kwds : None or dict
not used yet
kwds : optional keywords for power function
These remaining keyword arguments are used as arguments to the
power function. Many power function support ``alternative`` as a
keyword argument, two-sample test support ``ratio``.
Returns
-------
fig : matplotlib figure instance
Notes
-----
This works only for classes where the ``power`` method has
``effect_size``, ``nobs`` and ``alpha`` as the first three arguments.
If the second argument is ``nobs1``, then the number of observations
in the plot are those for the first sample.
TODO: fix this for FTestPower and GofChisquarePower
TODO: maybe add line variable, if we want more than nobs and effectsize
'''
#if pwr_kwds is None:
# pwr_kwds = {}
from statsmodels.graphics import utils
from statsmodels.graphics.plottools import rainbow
fig, ax = utils.create_mpl_ax(ax)
import matplotlib.pyplot as plt
colormap = plt.cm.Dark2 #pylint: disable-msg=E1101
plt_alpha = 1 #0.75
lw = 2
if dep_var == 'nobs':
colors = rainbow(len(effect_size))
colors = [colormap(i) for i in np.linspace(0, 0.9, len(effect_size))]
for ii, es in enumerate(effect_size):
power = self.power(es, nobs, alpha, **kwds)
ax.plot(nobs, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='es=%4.2F' % es)
xlabel = 'Number of Observations'
elif dep_var in ['effect size', 'effect_size', 'es']:
colors = rainbow(len(nobs))
colors = [colormap(i) for i in np.linspace(0, 0.9, len(nobs))]
for ii, n in enumerate(nobs):
power = self.power(effect_size, n, alpha, **kwds)
ax.plot(effect_size, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='N=%4.2F' % n)
xlabel = 'Effect Size'
elif dep_var in ['alpha']:
# experimental nobs as defining separate lines
colors = rainbow(len(nobs))
for ii, n in enumerate(nobs):
power = self.power(effect_size, n, alpha, **kwds)
ax.plot(alpha, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='N=%4.2F' % n)
xlabel = 'alpha'
else:
raise ValueError('depvar not implemented')
if title is None:
title = 'Power of Test'
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.legend(loc='lower right')
return fig
class TTestPower(Power):
'''Statistical Power calculations for one sample or paired sample t-test
'''
def power(self, effect_size, nobs, alpha, df=None, alternative='two-sided'):
'''Calculate the power of a t-test for one sample or paired samples.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
df : int or float
degrees of freedom. By default this is None, and the df from the
one sample or paired ttest is used, ``df = nobs1 - 1``
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
# for debugging
#print 'calling ttest power with', (effect_size, nobs, alpha, df, alternative)
return ttest_power(effect_size, nobs, alpha, df=df,
alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None, power=None,
alternative='two-sided'):
'''solve for any one parameter of the power of a one sample t-test
for the one sample t-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
This test can also be used for a paired t-test, where effect size is
defined in terms of the mean difference, and nobs is the number of
pairs.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
*attaches*
cache_fit_res : list
Cache of the result of the root finding procedure for the latest
call to ``solve_power``, mainly for debugging purposes.
The first element is the success indicator, one if successful.
The remaining elements contain the return information of the up to
three solvers that have been tried.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
# for debugging
#print 'calling ttest solve with', (effect_size, nobs, alpha, power, alternative)
return super(TTestPower, self).solve_power(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
power=power,
alternative=alternative)
class TTestIndPower(Power):
'''Statistical Power calculations for t-test for two independent sample
currently only uses pooled variance
'''
def power(self, effect_size, nobs1, alpha, ratio=1, df=None,
alternative='two-sided'):
'''Calculate the power of a t-test for two independent sample
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. `effect_size` has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments, it has to be explicitly set to None.
df : int or float
degrees of freedom. By default this is None, and the df from the
ttest with pooled variance is used, ``df = (nobs1 - 1 + nobs2 - 1)``
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
nobs2 = nobs1*ratio
#pooled variance
if df is None:
df = (nobs1 - 1 + nobs2 - 1)
nobs = 1./ (1. / nobs1 + 1. / nobs2)
#print 'calling ttest power with', (effect_size, nobs, alpha, df, alternative)
return ttest_power(effect_size, nobs, alpha, df=df, alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample t-test
for t-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. `effect_size` has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(TTestIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
class NormalIndPower(Power):
'''Statistical Power calculations for z-test for two independent samples.
currently only uses pooled variance
'''
def __init__(self, ddof=0, **kwds):
self.ddof = ddof
super(NormalIndPower, self).__init__(**kwds)
def power(self, effect_size, nobs1, alpha, ratio=1,
alternative='two-sided'):
'''Calculate the power of a t-test for two independent sample
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. effect size has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
``ratio`` can be set to zero in order to get the power for a
one sample test.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
ddof = self.ddof # for correlation, ddof=3
# get effective nobs, factor for std of test statistic
if ratio > 0:
nobs2 = nobs1*ratio
#equivalent to nobs = n1*n2/(n1+n2)=n1*ratio/(1+ratio)
nobs = 1./ (1. / (nobs1 - ddof) + 1. / (nobs2 - ddof))
else:
nobs = nobs1 - ddof
return normal_power(effect_size, nobs, alpha, alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample z-test
for z-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation.
If ratio=0, then this is the standardized mean in the one sample
test.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
``ratio`` can be set to zero in order to get the power for a
one sample test.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(NormalIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
class FTestPower(Power):
'''Statistical Power calculations for generic F-test
'''
def power(self, effect_size, df_num, df_denom, alpha, ncc=1):
'''Calculate the power of a F-test.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
df_num : int or float
numerator degrees of freedom.
df_denom : int or float
denominator degrees of freedom.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ncc : int
degrees of freedom correction for non-centrality parameter.
see Notes
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Notes
-----
sample size is given implicitly by df_num
set ncc=0 to match t-test, or f-test in LikelihoodModelResults.
ncc=1 matches the non-centrality parameter in R::pwr::pwr.f2.test
ftest_power with ncc=0 should also be correct for f_test in regression
models, with df_num and d_denom as defined there. (not verified yet)
'''
pow_ = ftest_power(effect_size, df_num, df_denom, alpha, ncc=ncc)
#print effect_size, df_num, df_denom, alpha, pow_
return pow_
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, df_num=None, df_denom=None,
nobs=None, alpha=None, power=None, ncc=1):
'''solve for any one parameter of the power of a F-test
for the one sample F-test the keywords are:
effect_size, df_num, df_denom, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(FTestPower, self).solve_power(effect_size=effect_size,
df_num=df_num,
df_denom=df_denom,
alpha=alpha,
power=power,
ncc=ncc)
class FTestAnovaPower(Power):
'''Statistical Power calculations F-test for one factor balanced ANOVA
'''
def power(self, effect_size, nobs, alpha, k_groups=2):
'''Calculate the power of a F-test for one factor ANOVA.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
k_groups : int or float
number of groups in the ANOVA or k-sample comparison. Default is 2.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
return ftest_anova_power(effect_size, nobs, alpha, k_groups=k_groups)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None, power=None,
k_groups=2):
'''solve for any one parameter of the power of a F-test
for the one sample F-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
# update start values for root finding
if not k_groups is None:
self.start_ttp['nobs'] = k_groups * 10
self.start_bqexp['nobs'] = dict(low=k_groups * 2,
start_upp=k_groups * 10)
# first attempt at special casing
if effect_size is None:
return self._solve_effect_size(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
return super(FTestAnovaPower, self).solve_power(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
def _solve_effect_size(self, effect_size=None, nobs=None, alpha=None,
power=None, k_groups=2):
'''experimental, test failure in solve_power for effect_size
'''
def func(x):
effect_size = x
return self._power_identity(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
val, r = optimize.brentq(func, 1e-8, 1-1e-8, full_output=True)
if not r.converged:
print r
return val
class GofChisquarePower(Power):
'''Statistical Power calculations for one sample chisquare test
'''
def power(self, effect_size, nobs, alpha, n_bins, ddof=0):
#alternative='two-sided'):
'''Calculate the power of a chisquare test for one sample
Only two-sided alternative is implemented
Parameters
----------
effect_size : float
standardized effect size, according to Cohen's definition.
see :func:`statsmodels.stats.gof.chisquare_effectsize`
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
n_bins : int
number of bins or cells in the distribution.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
from statsmodels.stats.gof import chisquare_power
return chisquare_power(effect_size, nobs, n_bins, alpha, ddof=0)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None,
power=None, n_bins=2):
'''solve for any one parameter of the power of a one sample chisquare-test
for the one sample chisquare-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
n_bins needs to be defined, a default=2 is used.
Parameters
----------
effect_size : float
standardized effect size, according to Cohen's definition.
see :func:`statsmodels.stats.gof.chisquare_effectsize`
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
n_bins : int
number of bins or cells in the distribution
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(GofChisquarePower, self).solve_power(effect_size=effect_size,
nobs=nobs,
n_bins=n_bins,
alpha=alpha,
power=power)
class _GofChisquareIndPower(Power):
'''Statistical Power calculations for chisquare goodness-of-fit test
TODO: this is not working yet
for 2sample case need two nobs in function
no one-sided chisquare test, is there one? use normal distribution?
-> drop one-sided options?
'''
def power(self, effect_size, nobs1, alpha, ratio=1,
alternative='two-sided'):
'''Calculate the power of a chisquare for two independent sample
Parameters
----------
effect_size : float
standardize effect size, difference between the two means divided
by the standard deviation. effect size has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitely set to None.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
from statsmodels.stats.gof import chisquare_power
nobs2 = nobs1*ratio
#equivalent to nobs = n1*n2/(n1+n2)=n1*ratio/(1+ratio)
nobs = 1./ (1. / nobs1 + 1. / nobs2)
return chisquare_power(effect_size, nobs, alpha)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample z-test
for z-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardize effect size, difference between the two means divided
by the standard deviation.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitely set to None.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(_GofChisquareIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
#shortcut functions
tt_solve_power = TTestPower().solve_power
tt_ind_solve_power = TTestIndPower().solve_power
zt_ind_solve_power = NormalIndPower().solve_power
| bsd-3-clause | ba0eb96387632c056cf7ceb9440d1fb3 | 40.799824 | 89 | 0.577203 | 4.337573 | false | true | false | false |
yarikoptic/pystatsmodels | statsmodels/stats/tests/test_corrpsd.py | 3 | 7727 | # -*- coding: utf-8 -*-
"""Tests for findind a positive semi-definite correlation of covariance matrix
Created on Mon May 27 12:07:02 2013
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from statsmodels.stats.correlation_tools import (
corr_nearest, corr_clipped, cov_nearest)
def norm_f(x, y):
'''Frobenious norm (squared sum) of difference between two arrays
'''
d = ((x - y)**2).sum()
return np.sqrt(d)
class Holder(object):
pass
# R library Matrix results
cov1_r = Holder()
#> nc <- nearPD(pr, conv.tol = 1e-7, keepDiag = TRUE, doDykstra =FALSE, corr=TRUE)
#> cat_items(nc, prefix="cov1_r.")
cov1_r.mat = '''<S4 object of class structure("dpoMatrix", package = "Matrix")>'''
cov1_r.eigenvalues = np.array([
4.197315628646795, 0.7540460243978023, 0.5077608149667492,
0.3801267599652769, 0.1607508970775889, 4.197315628646795e-08
])
cov1_r.corr = '''TRUE'''
cov1_r.normF = 0.0743805226512533
cov1_r.iterations = 11
cov1_r.rel_tol = 8.288594638441735e-08
cov1_r.converged = '''TRUE'''
#> mkarray2(as.matrix(nc$mat), name="cov1_r.mat")
cov1_r.mat = np.array([
1, 0.487968018215892, 0.642651880010906, 0.4906386709070835,
0.6440990530811909, 0.8087111845493985, 0.487968018215892, 1,
0.5141147294352735, 0.2506688108312097, 0.672351311297074,
0.725832055882795, 0.642651880010906, 0.5141147294352735, 1,
0.596827778712154, 0.5821917790519067, 0.7449631633814129,
0.4906386709070835, 0.2506688108312097, 0.596827778712154, 1,
0.729882058012399, 0.772150225146826, 0.6440990530811909,
0.672351311297074, 0.5821917790519067, 0.729882058012399, 1,
0.813191720191944, 0.8087111845493985, 0.725832055882795,
0.7449631633814129, 0.772150225146826, 0.813191720191944, 1
]).reshape(6,6, order='F')
cov_r = Holder()
#nc <- nearPD(pr+0.01*diag(6), conv.tol = 1e-7, keepDiag = TRUE, doDykstra =FALSE, corr=FALSE)
#> cat_items(nc, prefix="cov_r.")
#cov_r.mat = '''<S4 object of class structure("dpoMatrix", package = "Matrix")>'''
cov_r.eigenvalues = np.array([
4.209897516692652, 0.7668341923072066, 0.518956980021938,
0.390838551407132, 0.1734728460460068, 4.209897516692652e-08
])
cov_r.corr = '''FALSE'''
cov_r.normF = 0.0623948693159157
cov_r.iterations = 11
cov_r.rel_tol = 5.83987595937896e-08
cov_r.converged = '''TRUE'''
#> mkarray2(as.matrix(nc$mat), name="cov_r.mat")
cov_r.mat = np.array([
1.01, 0.486207476951913, 0.6428524769306785, 0.4886092840296514,
0.645175579158233, 0.811533860074678, 0.486207476951913, 1.01,
0.514394615153752, 0.2478398278204047, 0.673852495852274,
0.7297661648968664, 0.6428524769306785, 0.514394615153752, 1.01,
0.5971503271420517, 0.582018469844712, 0.7445177382760834,
0.4886092840296514, 0.2478398278204047, 0.5971503271420517, 1.01,
0.73161232298669, 0.7766852947049376, 0.645175579158233,
0.673852495852274, 0.582018469844712, 0.73161232298669, 1.01,
0.8107916469252828, 0.811533860074678, 0.7297661648968664,
0.7445177382760834, 0.7766852947049376, 0.8107916469252828, 1.01
]).reshape(6,6, order='F')
def test_corr_psd():
# test positive definite matrix is unchanged
x = np.array([[1, -0.2, -0.9], [-0.2, 1, -0.2], [-0.9, -0.2, 1]])
y = corr_nearest(x, n_fact=100)
#print np.max(np.abs(x - y))
assert_almost_equal(x, y, decimal=14)
y = corr_clipped(x)
assert_almost_equal(x, y, decimal=14)
y = cov_nearest(x, n_fact=100)
assert_almost_equal(x, y, decimal=14)
x2 = x + 0.001 * np.eye(3)
y = cov_nearest(x2, n_fact=100)
assert_almost_equal(x2, y, decimal=14)
class CheckCorrPSDMixin(object):
def test_nearest(self):
x = self.x
res_r = self.res
y = corr_nearest(x, threshold=1e-7, n_fact=100)
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=3)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.0015)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals / res_r.eigenvalues[::-1] - 1
assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.003, atol=1e-7)
#print evals[0] / 1e-7 - 1
assert_allclose(evals[0], 1e-7, rtol=1e-6)
def test_clipped(self):
x = self.x
res_r = self.res
y = corr_clipped(x, threshold=1e-7)
#print np.max(np.abs(x - y)), np.max(np.abs((x - y) / y))
assert_almost_equal(y, res_r.mat, decimal=1)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.15)
evals = np.linalg.eigvalsh(y)
assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.1, atol=1e-7)
assert_allclose(evals[0], 1e-7, rtol=0.02)
def test_cov_nearest(self):
x = self.x
res_r = self.res
y = cov_nearest(x, method='nearest', threshold=1e-7)
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=2)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.0015)
class TestCovPSD(object):
@classmethod
def setup_class(cls):
x = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
cls.x = x + 0.01 * np.eye(6)
cls.res = cov_r
def test_cov_nearest(self):
x = self.x
res_r = self.res
y = cov_nearest(x, method='nearest')
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=3)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.001)
y = cov_nearest(x, method='clipped')
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=2)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.15)
class TestCorrPSD1(CheckCorrPSDMixin):
@classmethod
def setup_class(cls):
x = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
cls.x = x
cls.res = cov1_r
def test_corrpsd_threshold():
x = np.array([[1, -0.9, -0.9], [-0.9, 1, -0.9], [-0.9, -0.9, 1]])
#print np.linalg.eigvalsh(x)
for threshold in [0, 1e-15, 1e-10, 1e-6]:
y = corr_nearest(x, n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
y = corr_clipped(x, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
y = cov_nearest(x, method='nearest', n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
#print evals[0] / threshold - 1
assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
y = cov_nearest(x, n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
#print evals[0] / threshold - 1
assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
| bsd-3-clause | fe218c4077d81fc429eaa7c246a8d0c8 | 36.328502 | 95 | 0.603598 | 2.518579 | false | true | false | false |
yarikoptic/pystatsmodels | statsmodels/graphics/tests/test_boxplots.py | 4 | 1261 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
"""Test violinplot and beanplot with the same dataset."""
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
| bsd-3-clause | 4cfae03f27be64d658afb8cb8d8382eb | 28.325581 | 73 | 0.604282 | 3.552113 | false | true | false | false |
yarikoptic/pystatsmodels | statsmodels/tsa/tests/results/arima112nc_css_results.py | 35 | 44167 | import numpy as np
llf = np.array([-239.75290561974])
nobs = np.array([ 202])
k = np.array([ 4])
k_exog = np.array([ 1])
sigma = np.array([ .79291203639424])
chi2 = np.array([ 35036.682546665])
df_model = np.array([ 3])
k_ar = np.array([ 1])
k_ma = np.array([ 2])
params = np.array([ .99954097483478,
-.69022779461512,
-.20477682541104,
.62870949745886])
cov_params = np.array([ .00007344276568,
-.00016074342677,
-.00018478942445,
8.040251506e-06,
-.00016074342677,
.00094099304774,
.00017233777676,
-.0000145011098,
-.00018478942445,
.00017233777676,
.00103352686916,
.00030686101903,
8.040251506e-06,
-.0000145011098,
.00030686101903,
.00067796985496]).reshape(4,4)
xb = np.array([ 0,
0,
.05104803293943,
.06663129478693,
.02164618112147,
.0773858949542,
.02606418170035,
.09391833096743,
.05710592120886,
.03083370067179,
.07319989800453,
.05287836492062,
.05776296555996,
.09105986356735,
.04293738678098,
.09576436132193,
.06068528071046,
.06157376244664,
.11172580718994,
.06527806818485,
.11443704366684,
.05653077363968,
.08205550909042,
.08481238037348,
.10436166077852,
.0875685736537,
.12320486456156,
.08366665989161,
.13979130983353,
.1902572363615,
.1306214183569,
.21803694963455,
.11079790443182,
.17274764180183,
.1937662512064,
.20047917962074,
.24034893512726,
.21783453226089,
.29279819130898,
.26804205775261,
.28678458929062,
.35651323199272,
.33659368753433,
.35760068893433,
.39895334839821,
.41131839156151,
.36645981669426,
.40991494059563,
.41024547815323,
.32657703757286,
.42312324047089,
.34933325648308,
.35912537574768,
.35077446699142,
.34701564908028,
.37364318966866,
.40170526504517,
.56070649623871,
.41915491223335,
.73478156328201,
.67748892307281,
.7744625210762,
.77825599908829,
.97586625814438,
.88692498207092,
.76232481002808,
.87376874685287,
.83281141519547,
.84783887863159,
.66423743963242,
.84904235601425,
.81613594293594,
.80033475160599,
.95782464742661,
.80624777078629,
.83626395463943,
.91873735189438,
.95130664110184,
1.0939226150513,
1.1171194314957,
1.1004731655121,
1.3512066602707,
1.4703129529953,
1.4805699586868,
1.7385860681534,
1.8268398046494,
1.5489361286163,
1.7446503639221,
1.864644408226,
1.7200467586517,
1.9223358631134,
1.775306224823,
1.5392524003983,
1.4067870378494,
1.9366238117218,
1.2984343767166,
1.1080636978149,
1.3500427007675,
1.2837564945221,
1.2670782804489,
1.3347851037979,
1.2857422828674,
1.1625040769577,
1.2111755609512,
1.0548515319824,
1.2553508281708,
1.0327949523926,
1.0740388631821,
1.222040772438,
.40555971860886,
1.0233588218689,
.84209614992142,
1.0186324119568,
1.0319027900696,
.99487775564194,
1.0439211130142,
.98785293102264,
1.0620124340057,
1.0916963815689,
1.1378232240677,
1.1243290901184,
1.3305295705795,
1.1925677061081,
1.0872994661331,
1.4599523544312,
1.2333589792252,
1.3584797382355,
1.7595859766006,
1.3009568452835,
1.1157965660095,
1.2948887348175,
1.2063180208206,
1.2332669496536,
1.2132470607758,
1.2049551010132,
1.2260574102402,
1.1875206232071,
1.1547852754593,
1.0519831180573,
1.1594845056534,
1.0069926977158,
1.0675266981125,
1.1299223899841,
1.0620901584625,
1.0999356508255,
1.1535499095917,
1.0026944875717,
1.0428657531738,
1.1120204925537,
1.1684119701385,
1.0258769989014,
1.1342295408249,
1.1183958053589,
.91313683986664,
.91156214475632,
1.0540328025818,
.84359037876129,
.75758427381516,
.96401190757751,
.83226495981216,
.8759680390358,
.98239886760712,
.85917687416077,
1.0634194612503,
.99442666769028,
1.153311252594,
1.2288066148758,
1.0869039297104,
1.281947016716,
1.0067318677902,
1.1028815507889,
.82448446750641,
.78489726781845,
1.1850204467773,
.86753690242767,
1.0692945718765,
1.1030179262161,
.8791960477829,
.86451041698456,
1.0455346107483,
1.085998415947,
1.0172398090363,
1.2250980138779,
1.2316122055054,
1.062157869339,
1.3991860151291,
1.0520887374878,
2.2203133106232,
.88833123445511,
1.4289729595184,
1.5206423997879,
.68520504236221,
1.4659557342529,
1.5350053310394,
1.3178979158401,
1.4888265132904,
1.9698411226273,
1.4406447410583,
2.517040014267,
.55537897348404,
-.20722626149654,
1.0899519920349,
1.164245724678])
y = np.array([np.nan,
28.979999542236,
29.201047897339,
29.416631698608,
29.391647338867,
29.617385864258,
29.576063156128,
29.84391784668,
29.897106170654,
29.84083366394,
29.993200302124,
30.032876968384,
30.097763061523,
30.3010597229,
30.262937545776,
30.475763320923,
30.500686645508,
30.541572570801,
30.801725387573,
30.815279006958,
31.054437637329,
31.006530761719,
31.102056503296,
31.20481300354,
31.384363174438,
31.467567443848,
31.703205108643,
31.733665466309,
32.019790649414,
32.47025680542,
32.580623626709,
33.068035125732,
33.010799407959,
33.272747039795,
33.593769073486,
33.900478363037,
34.340347290039,
34.617835998535,
35.192798614502,
35.568042755127,
35.986785888672,
36.656513214111,
37.13659286499,
37.657600402832,
38.29895401001,
38.911319732666,
39.266460418701,
39.809917449951,
40.310245513916,
40.426574707031,
41.023120880127,
41.249336242676,
41.559127807617,
41.850772857666,
42.14701461792,
42.573642730713,
43.101707458496,
44.260707855225,
44.619155883789,
46.334781646729,
47.477489471436,
48.874462127686,
50.078254699707,
51.9758644104,
53.186923980713,
53.762325286865,
54.873767852783,
55.732814788818,
56.647838592529,
56.764236450195,
57.849040985107,
58.716136932373,
59.500335693359,
60.957824707031,
61.606246948242,
62.436264038086,
63.61873626709,
64.85131072998,
66.593925476074,
68.21711730957,
69.600471496582,
71.951202392578,
74.470314025879,
76.680564880371,
79.738586425781,
82.726844787598,
84.148933410645,
86.444648742676,
89.064643859863,
90.820045471191,
93.422332763672,
95.175308227539,
95.939254760742,
96.406784057617,
99.436622619629,
99.398429870605,
99.00806427002,
100.15004730225,
101.08376312256,
102.06707763672,
103.43478393555,
104.58574676514,
105.26250457764,
106.31117248535,
106.75485229492,
108.25534820557,
108.73279571533,
109.57403564453,
111.12203979492,
109.10556030273,
110.52336120605,
111.04209136963,
112.41863250732,
113.73190307617,
114.79488372803,
116.04392242432,
116.98785400391,
118.26200866699,
119.59169769287,
121.03782653809,
122.32432556152,
124.4305267334,
125.69256591797,
126.4873046875,
128.95994567871,
130.13334655762,
131.85847473145,
135.15957641602,
136.00096130371,
136.21580505371,
137.49488830566,
138.40631103516,
139.53326416016,
140.61323547363,
141.70495605469,
142.9260559082,
143.98751831055,
144.95478820801,
145.55198669434,
146.7594909668,
147.30699157715,
148.26751708984,
149.52992248535,
150.46208190918,
151.59992980957,
152.95355224609,
153.60270690918,
154.54286193848,
155.81201171875,
157.2684173584,
158.02587890625,
159.33422851563,
160.51838684082,
160.81312561035,
161.31155395508,
162.55403137207,
162.84359741211,
162.95758056641,
164.16400146484,
164.73225402832,
165.57595825195,
166.88238525391,
167.55917358398,
169.16342163086,
170.29443359375,
172.0532989502,
173.92880249023,
174.9868927002,
176.88195800781,
177.40672302246,
178.50286865234,
178.42448425293,
178.48489379883,
180.48501586914,
180.86753845215,
182.26928710938,
183.70301818848,
184.07919311523,
184.56451416016,
185.94552612305,
187.38600158691,
188.41723632813,
190.32510375977,
192.03161621094,
192.8621673584,
195.19918823242,
195.7520904541,
201.42030334473,
200.28833007813,
202.12896728516,
204.22064208984,
202.58520507813,
205.03996276855,
207.45500183105,
208.65589904785,
210.62182617188,
214.46484375,
215.4376373291,
221.12704467773,
217.44438171387,
211.96676635742,
213.76095581055,
215.63323974609])
resid = np.array([np.nan,
.17000007629395,
.14895272254944,
-.04663083329797,
.14835388958454,
-.067387573421,
.17393657565117,
-.00391817837954,
-.08710660785437,
.07916691154242,
-.01320043392479,
.00712300790474,
.11223520338535,
-.08105963468552,
.11706246435642,
-.03576298803091,
-.0206862706691,
.14842723309994,
-.05172633752227,
.12472246587276,
-.104436814785,
.01346892025322,
.01794487424195,
.07518746703863,
-.00436318712309,
.11243218928576,
-.05320516973734,
.14633287489414,
.26020830869675,
-.02025525644422,
.26937630772591,
-.16803389787674,
.08919904381037,
.12725540995598,
.10623299330473,
.1995185315609,
.05965411290526,
.28216546773911,
.10719951242208,
.13195945322514,
.31321388483047,
.14348678290844,
.16340629756451,
.24240159988403,
.20104511082172,
-.01131687406451,
.13354018330574,
.09008505195379,
-.21024852991104,
.1734229773283,
-.12312019616365,
-.04933403432369,
-.05912613496184,
-.05077522993088,
.05298588424921,
.12635681033134,
.59829473495483,
-.06070650368929,
.98084276914597,
.46521919965744,
.62251031398773,
.42553821206093,
.92174476385117,
.32413294911385,
-.18692423403263,
.23767517507076,
.02623280882835,
.06718628853559,
-.54783964157104,
.23576405644417,
.05095916613936,
-.01613672077656,
.49966445565224,
-.1578253954649,
-.00624855514616,
.26373836398125,
.28126338124275,
.64869183301926,
.50607579946518,
.28288212418556,
.99952530860901,
1.0487948656082,
.72968405485153,
1.319433093071,
1.1614154577255,
-.126842841506,
.55106240510941,
.75534957647324,
.03535716608167,
.67995470762253,
-.02233435213566,
-.775306224823,
-.93925392627716,
1.0932129621506,
-1.3366253376007,
-1.4984313249588,
-.20806217193604,
-.35004270076752,
-.28375649452209,
.03291719779372,
-.13478049635887,
-.48574689030647,
-.16250413656235,
-.61117708683014,
.24515150487423,
-.55535387992859,
-.23279193043709,
.32596263289452,
-2.4220454692841,
.39444333314896,
-.32336190342903,
.3579084277153,
.28136301040649,
.06810333579779,
.20511920750141,
-.04392115399241,
.2121440321207,
.23799060285091,
.30830511450768,
.16217225790024,
.77567237615585,
.06947190314531,
-.29256621003151,
1.012699007988,
-.05995841696858,
.36664715409279,
1.5415141582489,
-.45958289504051,
-.90094763040543,
-.01580573618412,
-.29488867521286,
-.10631188750267,
-.13327611982822,
-.11324090510607,
-.00495810527354,
-.12605135142803,
-.18752062320709,
-.45478835701942,
.04802301898599,
-.45948752760887,
-.1069987937808,
.13247023522854,
-.12992236018181,
.0379159450531,
.20006743073463,
-.35354685783386,
-.10270062834024,
.15713113546371,
.28798860311508,
-.26841807365417,
.17411996424198,
.06576746702194,
-.61839586496353,
-.41313683986664,
.18844394385815,
-.55403280258179,
-.6435934305191,
.24241572618484,
-.26401495933533,
-.03226193040609,
.32402887940407,
-.1823958158493,
.54083228111267,
.13657745718956,
.60556417703629,
.64669179916382,
-.02880969829857,
.61310833692551,
-.48195922374725,
-.00673185009509,
-.90286934375763,
-.72449362277985,
.81510883569717,
-.48502343893051,
.33246004581451,
.33071464300156,
-.50302714109421,
-.3791960477829,
.33548650145531,
.35447460412979,
.01399240642786,
.682772397995,
.474898904562,
-.23161220550537,
.93784213066101,
-.49919214844704,
3.4479112625122,
-2.020316362381,
.41167178750038,
.57102704048157,
-2.3206453323364,
.98880618810654,
.8800373673439,
-.11700607091188,
.47710022330284,
1.8731729984283,
-.46784225106239,
3.1723618507385,
-4.2380332946777,
-5.2703905105591,
.70423555374146,
.70803683996201,
.7517546415329])
yr = np.array([np.nan,
.17000007629395,
.14895272254944,
-.04663083329797,
.14835388958454,
-.067387573421,
.17393657565117,
-.00391817837954,
-.08710660785437,
.07916691154242,
-.01320043392479,
.00712300790474,
.11223520338535,
-.08105963468552,
.11706246435642,
-.03576298803091,
-.0206862706691,
.14842723309994,
-.05172633752227,
.12472246587276,
-.104436814785,
.01346892025322,
.01794487424195,
.07518746703863,
-.00436318712309,
.11243218928576,
-.05320516973734,
.14633287489414,
.26020830869675,
-.02025525644422,
.26937630772591,
-.16803389787674,
.08919904381037,
.12725540995598,
.10623299330473,
.1995185315609,
.05965411290526,
.28216546773911,
.10719951242208,
.13195945322514,
.31321388483047,
.14348678290844,
.16340629756451,
.24240159988403,
.20104511082172,
-.01131687406451,
.13354018330574,
.09008505195379,
-.21024852991104,
.1734229773283,
-.12312019616365,
-.04933403432369,
-.05912613496184,
-.05077522993088,
.05298588424921,
.12635681033134,
.59829473495483,
-.06070650368929,
.98084276914597,
.46521919965744,
.62251031398773,
.42553821206093,
.92174476385117,
.32413294911385,
-.18692423403263,
.23767517507076,
.02623280882835,
.06718628853559,
-.54783964157104,
.23576405644417,
.05095916613936,
-.01613672077656,
.49966445565224,
-.1578253954649,
-.00624855514616,
.26373836398125,
.28126338124275,
.64869183301926,
.50607579946518,
.28288212418556,
.99952530860901,
1.0487948656082,
.72968405485153,
1.319433093071,
1.1614154577255,
-.126842841506,
.55106240510941,
.75534957647324,
.03535716608167,
.67995470762253,
-.02233435213566,
-.775306224823,
-.93925392627716,
1.0932129621506,
-1.3366253376007,
-1.4984313249588,
-.20806217193604,
-.35004270076752,
-.28375649452209,
.03291719779372,
-.13478049635887,
-.48574689030647,
-.16250413656235,
-.61117708683014,
.24515150487423,
-.55535387992859,
-.23279193043709,
.32596263289452,
-2.4220454692841,
.39444333314896,
-.32336190342903,
.3579084277153,
.28136301040649,
.06810333579779,
.20511920750141,
-.04392115399241,
.2121440321207,
.23799060285091,
.30830511450768,
.16217225790024,
.77567237615585,
.06947190314531,
-.29256621003151,
1.012699007988,
-.05995841696858,
.36664715409279,
1.5415141582489,
-.45958289504051,
-.90094763040543,
-.01580573618412,
-.29488867521286,
-.10631188750267,
-.13327611982822,
-.11324090510607,
-.00495810527354,
-.12605135142803,
-.18752062320709,
-.45478835701942,
.04802301898599,
-.45948752760887,
-.1069987937808,
.13247023522854,
-.12992236018181,
.0379159450531,
.20006743073463,
-.35354685783386,
-.10270062834024,
.15713113546371,
.28798860311508,
-.26841807365417,
.17411996424198,
.06576746702194,
-.61839586496353,
-.41313683986664,
.18844394385815,
-.55403280258179,
-.6435934305191,
.24241572618484,
-.26401495933533,
-.03226193040609,
.32402887940407,
-.1823958158493,
.54083228111267,
.13657745718956,
.60556417703629,
.64669179916382,
-.02880969829857,
.61310833692551,
-.48195922374725,
-.00673185009509,
-.90286934375763,
-.72449362277985,
.81510883569717,
-.48502343893051,
.33246004581451,
.33071464300156,
-.50302714109421,
-.3791960477829,
.33548650145531,
.35447460412979,
.01399240642786,
.682772397995,
.474898904562,
-.23161220550537,
.93784213066101,
-.49919214844704,
3.4479112625122,
-2.020316362381,
.41167178750038,
.57102704048157,
-2.3206453323364,
.98880618810654,
.8800373673439,
-.11700607091188,
.47710022330284,
1.8731729984283,
-.46784225106239,
3.1723618507385,
-4.2380332946777,
-5.2703905105591,
.70423555374146,
.70803683996201,
.7517546415329])
mse = np.array([ .95459979772568,
.71522510051727,
.63122135400772,
.6314896941185,
.63029319047928,
.63014930486679,
.62988424301147,
.62969470024109,
.62953060865402,
.6293950676918,
.62928181886673,
.62918740510941,
.62910866737366,
.62904292345047,
.62898802757263,
.6289421916008,
.62890386581421,
.62887191772461,
.62884521484375,
.6288229227066,
.62880426645279,
.6287887096405,
.6287756562233,
.62876480817795,
.62875574827194,
.62874811887741,
.62874180078506,
.62873649597168,
.62873202562332,
.62872833013535,
.62872523069382,
.62872266769409,
.62872052192688,
.62871867418289,
.62871718406677,
.62871593236923,
.62871485948563,
.62871396541595,
.62871325016022,
.62871265411377,
.62871211767197,
.62871170043945,
.62871134281158,
.62871104478836,
.62871074676514,
.6287105679512,
.62871038913727,
.62871026992798,
.62871015071869,
.6287100315094,
.62870991230011,
.62870985269547,
.62870979309082,
.62870973348618,
.62870973348618,
.62870967388153,
.62870967388153,
.62870961427689,
.62870961427689,
.62870955467224,
.62870955467224,
.62870955467224,
.62870955467224,
.62870955467224,
.62870955467224,
.62870955467224,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676,
.6287094950676])
stdp = np.array([ 0,
0,
.05104803293943,
.06663129478693,
.02164618112147,
.0773858949542,
.02606418170035,
.09391833096743,
.05710592120886,
.03083370067179,
.07319989800453,
.05287836492062,
.05776296555996,
.09105986356735,
.04293738678098,
.09576436132193,
.06068528071046,
.06157376244664,
.11172580718994,
.06527806818485,
.11443704366684,
.05653077363968,
.08205550909042,
.08481238037348,
.10436166077852,
.0875685736537,
.12320486456156,
.08366665989161,
.13979130983353,
.1902572363615,
.1306214183569,
.21803694963455,
.11079790443182,
.17274764180183,
.1937662512064,
.20047917962074,
.24034893512726,
.21783453226089,
.29279819130898,
.26804205775261,
.28678458929062,
.35651323199272,
.33659368753433,
.35760068893433,
.39895334839821,
.41131839156151,
.36645981669426,
.40991494059563,
.41024547815323,
.32657703757286,
.42312324047089,
.34933325648308,
.35912537574768,
.35077446699142,
.34701564908028,
.37364318966866,
.40170526504517,
.56070649623871,
.41915491223335,
.73478156328201,
.67748892307281,
.7744625210762,
.77825599908829,
.97586625814438,
.88692498207092,
.76232481002808,
.87376874685287,
.83281141519547,
.84783887863159,
.66423743963242,
.84904235601425,
.81613594293594,
.80033475160599,
.95782464742661,
.80624777078629,
.83626395463943,
.91873735189438,
.95130664110184,
1.0939226150513,
1.1171194314957,
1.1004731655121,
1.3512066602707,
1.4703129529953,
1.4805699586868,
1.7385860681534,
1.8268398046494,
1.5489361286163,
1.7446503639221,
1.864644408226,
1.7200467586517,
1.9223358631134,
1.775306224823,
1.5392524003983,
1.4067870378494,
1.9366238117218,
1.2984343767166,
1.1080636978149,
1.3500427007675,
1.2837564945221,
1.2670782804489,
1.3347851037979,
1.2857422828674,
1.1625040769577,
1.2111755609512,
1.0548515319824,
1.2553508281708,
1.0327949523926,
1.0740388631821,
1.222040772438,
.40555971860886,
1.0233588218689,
.84209614992142,
1.0186324119568,
1.0319027900696,
.99487775564194,
1.0439211130142,
.98785293102264,
1.0620124340057,
1.0916963815689,
1.1378232240677,
1.1243290901184,
1.3305295705795,
1.1925677061081,
1.0872994661331,
1.4599523544312,
1.2333589792252,
1.3584797382355,
1.7595859766006,
1.3009568452835,
1.1157965660095,
1.2948887348175,
1.2063180208206,
1.2332669496536,
1.2132470607758,
1.2049551010132,
1.2260574102402,
1.1875206232071,
1.1547852754593,
1.0519831180573,
1.1594845056534,
1.0069926977158,
1.0675266981125,
1.1299223899841,
1.0620901584625,
1.0999356508255,
1.1535499095917,
1.0026944875717,
1.0428657531738,
1.1120204925537,
1.1684119701385,
1.0258769989014,
1.1342295408249,
1.1183958053589,
.91313683986664,
.91156214475632,
1.0540328025818,
.84359037876129,
.75758427381516,
.96401190757751,
.83226495981216,
.8759680390358,
.98239886760712,
.85917687416077,
1.0634194612503,
.99442666769028,
1.153311252594,
1.2288066148758,
1.0869039297104,
1.281947016716,
1.0067318677902,
1.1028815507889,
.82448446750641,
.78489726781845,
1.1850204467773,
.86753690242767,
1.0692945718765,
1.1030179262161,
.8791960477829,
.86451041698456,
1.0455346107483,
1.085998415947,
1.0172398090363,
1.2250980138779,
1.2316122055054,
1.062157869339,
1.3991860151291,
1.0520887374878,
2.2203133106232,
.88833123445511,
1.4289729595184,
1.5206423997879,
.68520504236221,
1.4659557342529,
1.5350053310394,
1.3178979158401,
1.4888265132904,
1.9698411226273,
1.4406447410583,
2.517040014267,
.55537897348404,
-.20722626149654,
1.0899519920349,
1.164245724678])
icstats = np.array([ 202,
np.nan,
-239.75290561974,
4,
487.50581123949,
500.73888202909])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| bsd-3-clause | 38e1b631a2ad894d093af386f39961a5 | 33.478532 | 229 | 0.396767 | 3.829287 | false | false | false | false |
rootpy/rootpy | rootpy/plotting/contrib/quantiles.py | 3 | 3602 | """
Taken from example by Zhiyi Liu, zhiyil@fnal.gov
here: http://root.cern.ch/phpBB3/viewtopic.php?f=3&t=6865
and converted into Python
"""
from __future__ import absolute_import
import ROOT
from math import sqrt
from array import array
from .. import Graph
from ...extern.six.moves import range
__all__ = [
'qqgraph',
]
def qqgraph(h1, h2, quantiles=None):
"""
Return a Graph of a quantile-quantile (QQ) plot and confidence band
"""
if quantiles is None:
quantiles = max(min(len(h1), len(h2)) / 2, 1)
nq = quantiles
# position where to compute the quantiles in [0, 1]
xq = array('d', [0.] * nq)
# array to contain the quantiles
yq1 = array('d', [0.] * nq)
# array to contain the quantiles
yq2 = array('d', [0.] * nq)
for i in range(nq):
xq[i] = float(i + 1) / nq
h1.GetQuantiles(nq, yq1, xq)
h2.GetQuantiles(nq, yq2, xq)
xq_plus = array('d', [0.] * nq)
xq_minus = array('d', [0.] * nq)
yq2_plus = array('d', [0.] * nq)
yq2_minus = array('d', [0.] * nq)
"""
KS_cv: KS critical value
1.36
KS_cv = -----------
sqrt( N )
Where 1.36 is for alpha = 0.05 (confidence level 1-5%=95%, about 2 sigma)
For 1 sigma (alpha=0.32, CL=68%), the value in the nominator is 0.9561,
it is gotten by GetCriticalValue(1, 1 - 0.68).
Notes
-----
* For 1-sample KS test (data and theoretic), N should be n
* For 2-sample KS test (2 data set), N should be sqrt(m*n/(m+n))!
Here is the case m or n (size of samples) should be effective size
for a histogram
* Critical value here is valid for only for sample size >= 80 (some
references say 35) which means, for example, for a unweighted histogram,
it must have more than 80 (or 35) entries filled and then confidence
band is reliable.
"""
esum1 = effective_sample_size(h1)
esum2 = effective_sample_size(h2)
# one sigma band
KS_cv = (critical_value(1, 1 - 0.68) /
sqrt((esum1 * esum2) / (esum1 + esum2)))
for i in range(nq):
# upper limit
xq_plus[i] = float(xq[i] + KS_cv)
# lower limit
xq_minus[i] = float(xq[i] - KS_cv)
h2.GetQuantiles(nq, yq2_plus, xq_plus)
h2.GetQuantiles(nq, yq2_minus, xq_minus)
yq2_err_plus = array('d', [0.] * nq)
yq2_err_minus = array('d', [0.] * nq)
for i in range(nq):
yq2_err_plus[i] = yq2_plus[i] - yq2[i]
yq2_err_minus[i] = yq2[i] - yq2_minus[i]
# forget the last point, so number of points: (nq - 1)
gr = Graph(nq - 1)
for i in range(nq - 1):
gr[i] = (yq1[i], yq2[i])
# confidence level band
gr.SetPointEYlow(i, yq2_err_minus[i])
gr.SetPointEYhigh(i, yq2_err_plus[i])
return gr
def effective_sample_size(h):
"""
Calculate the effective sample size for a histogram
the same way as ROOT does.
"""
sum = 0
ew = 0
w = 0
for bin in h.bins(overflow=False):
sum += bin.value
ew = bin.error
w += ew * ew
esum = sum * sum / w
return esum
def critical_value(n, p):
"""
This function calculates the critical value given
n and p, and confidence level = 1 - p.
"""
dn = 1
delta = 0.5
res = ROOT.TMath.KolmogorovProb(dn * sqrt(n))
while res > 1.0001 * p or res < 0.9999 * p:
if (res > 1.0001 * p):
dn = dn + delta
if (res < 0.9999 * p):
dn = dn - delta
delta = delta / 2.
res = ROOT.TMath.KolmogorovProb(dn * sqrt(n))
return dn
| bsd-3-clause | 18d1f25931dd5e300dbc305b432cf04b | 25.101449 | 78 | 0.564409 | 2.8542 | false | false | false | false |
rootpy/rootpy | examples/tree/chain_overwrite.py | 7 | 2603 | #!/usr/bin/env python
"""
============================================
Copy a tree chain while overwriting branches
============================================
This is an example showing how to copy a tree chain while overwriting one or
more of its branches with new values.
"""
print(__doc__)
from rootpy.tree import Tree, TreeModel, TreeChain, FloatCol, IntCol
from rootpy.io import root_open
from random import gauss
"""
This first section of code only creates an example tree chain.
"""
class Event(TreeModel):
"""Event model definition"""
x = FloatCol()
y = FloatCol()
z = FloatCol()
i = IntCol()
# first create several example trees in separate files
fnames = ["test_{0:d}.root".format(i) for i in range(5)]
for fname in fnames:
with root_open(fname, "recreate") as f:
tree = Tree("test", model=Event)
# fill the tree
for i in range(100):
tree.x = gauss(.5, 1.)
tree.y = gauss(.3, 2.)
tree.z = gauss(13., 42.)
tree.i = i
tree.fill()
tree.write()
"""
This section below takes the example trees and copies it while overwriting a
branch with new values.
"""
# first define the chain of trees
chain = TreeChain(name="test", files=fnames)
# Now we want to copy the tree above into a new file while overwriting a branch
# First create a new file to save the new tree in:
f_copy = root_open("test_copy.root", "recreate")
# You may not know the entire model of the original tree but only the branches
# you intend to overwrite, so I am not specifying the model=Event below as an
# example of how to deal with this in general:
tree_copy = Tree("test_copy")
# If the original tree was not handed to you through rootpy don't forget to:
# >>> from rootpy import asrootpy
# >>> tree = asrootpy(tree)
# Here we specify the buffer for the new tree to use. We use the same buffer as
# the original tree. This creates all the same branches in the new tree but
# their addresses point to the same memory used by the original tree.
tree_copy.set_buffer(
chain._buffer,
create_branches=True)
# Now loop over the original tree and fill the new tree
for entry in chain:
# Overwrite a branch value. This changes the value that will be written to
# the new tree but leaves the value unchanged in the original tree on disk.
entry.x = 3.141
# "entry" is actually the buffer, which is shared between both trees.
tree_copy.Fill()
# tree_copy is now a copy of tree where the "x" branch has been overwritten
# with new values
tree_copy.Write()
f_copy.Close()
| bsd-3-clause | 2a26b1156f389b01d1bf097fe6f96344 | 30.743902 | 79 | 0.663081 | 3.788937 | false | true | false | false |
rootpy/rootpy | examples/plotting/plot_matplotlib_graph.py | 7 | 2423 | #!/usr/bin/env python
"""
=====================================
Plot a ROOT graph with matplotlib
=====================================
This example demonstrates how a ROOT graph can be styled with simple
attributes and displayed via ROOT or matplotlib.
"""
print(__doc__)
import ROOT
import numpy as np
from rootpy.plotting import Canvas, Graph
from rootpy.plotting.style import get_style, set_style
from rootpy.interactive import wait
import rootpy.plotting.root2matplotlib as rplt
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# set the random seed
ROOT.gRandom.SetSeed(42)
np.random.seed(42)
# points
x = np.sort(np.random.random(10)) * 3500
y = np.random.random(10)
# set style for ROOT
set_style('ATLAS')
# create graph
graph = Graph(x.shape[0])
for i, (xx, yy) in enumerate(zip(x, y)):
graph.SetPoint(i, xx, yy)
# set visual attributes
graph.linecolor = 'blue'
graph.markercolor = 'blue'
graph.xaxis.SetTitle("E_{T} [GeV]")
graph.yaxis.SetTitle("d#sigma_{jet}/dE_{T,jet} [fb/GeV]")
graph.xaxis.SetRangeUser(0, 3500)
graph.yaxis.SetRangeUser(0, 1)
# plot with ROOT
canvas = Canvas()
graph.Draw("APL")
label = ROOT.TText(0.4, 0.8, "ROOT")
label.SetTextFont(43)
label.SetTextSize(25)
label.SetNDC()
label.Draw()
canvas.Modified()
canvas.Update()
# plot with matplotlib
def plot_with_matplotlib():
fig, axes = plt.subplots()
axes.plot(x, y, 'o-', markeredgewidth=0)
axes.set_xlabel(r"$E_T$ [GeV]",
horizontalalignment="right", x=1, labelpad=20)
axes.set_ylabel(r"$d\sigma_{jet}/dE_{T,jet}$ [fb/GeV]",
horizontalalignment="right", y=1, labelpad=32)
axes.set_xlim(0, 3500)
axes.set_ylim(0, 1)
return fig, axes
# plot without style
fig1, axes1 = plot_with_matplotlib()
axes1.text(0.4, 0.8, 'matplotlib (no style)',
verticalalignment='center', horizontalalignment='center',
transform=axes1.transAxes, fontsize=20)
# plot with ATLAS style
set_style('ATLAS', mpl=True)
fig2, axes2 = plot_with_matplotlib()
axes2.text(0.4, 0.8, 'matplotlib',
verticalalignment='center', horizontalalignment='center',
transform=axes2.transAxes, fontsize=20)
axes2.xaxis.set_minor_locator(AutoMinorLocator())
axes2.yaxis.set_minor_locator(AutoMinorLocator())
if not ROOT.gROOT.IsBatch():
plt.show()
# wait for you to close the canvas before exiting
wait(True)
| bsd-3-clause | 435357524153c214c2bf7a3f179d56b8 | 25.922222 | 68 | 0.680149 | 3.082697 | false | false | false | false |
rootpy/rootpy | rootpy/utils/tests/test_cpp.py | 3 | 1191 | from __future__ import print_function
import sys
from ROOT import MethodProxy
import inspect
from rootpy.utils.cpp import CPPGrammar
from rootpy.utils.extras import iter_ROOT_classes
from nose.plugins.attrib import attr
@attr('slow')
def test_cpp():
i = 0
num_methods = 0
for cls in iter_ROOT_classes():
members = inspect.getmembers(cls)
# filter out those starting with "_" or "operator "
# and non-method members
# also split overloaded methods
methods = {}
for name, func in members:
if name.startswith('_') or name.startswith('operator'):
continue
if not isinstance(func, MethodProxy):
continue
methods[name] = (func, func.func_doc.split('\n'))
for name, (func, sigs) in methods.items():
for sig in sigs:
num_methods += 1
if CPPGrammar.parse_method(sig, silent=False):
i += 1
print("{0} / {1}".format(i, num_methods), end='\r')
sys.stdout.flush()
print("{0} / {1}".format(i, num_methods))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause | 0097066d9aba4a3a9e15c39579844e63 | 28.775 | 67 | 0.570949 | 4.078767 | false | false | false | false |
rootpy/rootpy | rootpy/tree/chain.py | 3 | 10692 | from __future__ import absolute_import
import multiprocessing
import time
from .. import log; log = log[__name__]
from .. import QROOT
from ..io import root_open, DoesNotExist
from ..utils.extras import humanize_bytes
from ..context import preserve_current_directory
from ..plotting.graph import _GraphBase
from ..extern.six import string_types
from .filtering import EventFilterList
__all__ = [
'TreeChain',
'TreeQueue',
]
class BaseTreeChain(object):
def __init__(self, name,
treebuffer=None,
branches=None,
ignore_branches=None,
events=-1,
onfilechange=None,
read_branches_on_demand=False,
cache=False,
# 30 MB cache by default
cache_size=30000000,
learn_entries=10,
always_read=None,
ignore_unsupported=False,
filters=None):
self._name = name
self._buffer = treebuffer
self._branches = branches
self._ignore_branches = ignore_branches
self._tree = None
self._file = None
self._events = events
self._total_events = 0
self._ignore_unsupported = ignore_unsupported
self._initialized = False
if filters is None:
self._filters = EventFilterList([])
else:
self._filters = filters
if onfilechange is None:
onfilechange = []
self._filechange_hooks = onfilechange
self._read_branches_on_demand = read_branches_on_demand
self._use_cache = cache
self._cache_size = cache_size
self._learn_entries = learn_entries
self.weight = 1.
self.userdata = {}
if not self._rollover():
raise RuntimeError("unable to initialize TreeChain")
if always_read is None:
self._always_read = []
elif isinstance(always_read, string_types):
if '*' in always_read:
always_read = self._tree.glob(always_read)
else:
always_read = [always_read]
self.always_read(always_read)
else:
branches = []
for branch in always_read:
if '*' in branch:
branches += self._tree.glob(branch)
else:
branches.append(branch)
self.always_read(branches)
def __nonzero__(self):
return len(self) > 0
__bool__ = __nonzero__
def _next_file(self):
"""
Override in subclasses
"""
return None
def always_read(self, branches):
self._always_read = branches
self._tree.always_read(branches)
def reset(self):
if self._tree is not None:
self._tree = None
if self._file is not None:
self._file.Close()
self._file = None
def Draw(self, *args, **kwargs):
"""
Loop over subfiles, draw each, and sum the output into a single
histogram.
"""
self.reset()
output = None
while self._rollover():
if output is None:
# Make our own copy of the drawn histogram
output = self._tree.Draw(*args, **kwargs)
if output is not None:
output = output.Clone()
# Make it memory resident (histograms)
if hasattr(output, 'SetDirectory'):
output.SetDirectory(0)
else:
newoutput = self._tree.Draw(*args, **kwargs)
if newoutput is not None:
if isinstance(output, _GraphBase):
output.Append(newoutput)
else: # histogram
output += newoutput
return output
draw = Draw
def __getattr__(self, attr):
try:
return getattr(self._tree, attr)
except AttributeError:
raise AttributeError("{0} instance has no attribute '{1}'".format(
self.__class__.__name__, attr))
def __getitem__(self, item):
return self._tree.__getitem__(item)
def __contains__(self, branch):
return self._tree.__contains__(branch)
def __iter__(self):
passed_events = 0
self.reset()
while self._rollover():
entries = 0
total_entries = float(self._tree.GetEntries())
t1 = time.time()
t2 = t1
for entry in self._tree:
entries += 1
self.userdata = {}
if self._filters(entry):
yield entry
passed_events += 1
if self._events == passed_events:
break
if time.time() - t2 > 60:
entry_rate = int(entries / (time.time() - t1))
log.info(
"{0:d} entr{1} per second. "
"{2:.0f}% done current tree.".format(
entry_rate,
'ies' if entry_rate != 1 else 'y',
100 * entries / total_entries))
t2 = time.time()
if self._events == passed_events:
break
log.info("{0:d} entries per second".format(
int(entries / (time.time() - t1))))
log.info("read {0:d} bytes in {1:d} transactions".format(
self._file.GetBytesRead(),
self._file.GetReadCalls()))
self._total_events += entries
self._filters.finalize()
def _rollover(self):
filename = self._next_file()
if filename is None:
return False
log.info("current file: {0}".format(filename))
try:
with preserve_current_directory():
if self._file is not None:
self._file.Close()
self._file = root_open(filename)
except IOError:
self._file = None
log.warning("could not open file {0} (skipping)".format(filename))
return self._rollover()
try:
self._tree = self._file.Get(self._name)
except DoesNotExist:
log.warning(
"tree {0} does not exist in file {1} (skipping)".format(
self._name, filename))
return self._rollover()
if len(self._tree.GetListOfBranches()) == 0:
log.warning("tree with no branches in file {0} (skipping)".format(
filename))
return self._rollover()
if self._branches is not None:
self._tree.activate(self._branches, exclusive=True)
if self._ignore_branches is not None:
self._tree.deactivate(self._ignore_branches, exclusive=False)
if self._buffer is None:
self._tree.create_buffer(self._ignore_unsupported)
self._buffer = self._tree._buffer
else:
self._tree.set_buffer(
self._buffer,
ignore_missing=True,
transfer_objects=True)
self._buffer = self._tree._buffer
if self._use_cache:
# enable TTreeCache for this tree
log.info(
"enabling a {0} TTreeCache for the current tree "
"({1:d} learning entries)".format(
humanize_bytes(self._cache_size), self._learn_entries))
self._tree.SetCacheSize(self._cache_size)
self._tree.SetCacheLearnEntries(self._learn_entries)
self._tree.read_branches_on_demand = self._read_branches_on_demand
self._tree.always_read(self._always_read)
self.weight = self._tree.GetWeight()
for target, args in self._filechange_hooks:
# run any user-defined functions
target(*args, name=self._name, file=self._file, tree=self._tree)
return True
class TreeChain(BaseTreeChain):
"""
A ROOT.TChain replacement
"""
def __init__(self, name, files, **kwargs):
if isinstance(files, tuple):
files = list(files)
elif not isinstance(files, list):
files = [files]
else:
files = files[:]
if not files:
raise RuntimeError(
"unable to initialize TreeChain: no files")
self._files = files
self.curr_file_idx = 0
super(TreeChain, self).__init__(name, **kwargs)
self._tchain = QROOT.TChain(name)
for filename in self._files:
self._tchain.Add(filename)
def GetEntries(self, *args, **kwargs):
return self._tchain.GetEntries(*args, **kwargs)
def GetEntriesFast(self, *args, **kwargs):
return self._tchain.GetEntriesFast(*args, **kwargs)
def reset(self):
"""
Reset the chain to the first file
Note: not valid when in queue mode
"""
super(TreeChain, self).reset()
self.curr_file_idx = 0
def __len__(self):
return len(self._files)
def _next_file(self):
if self.curr_file_idx >= len(self._files):
return None
filename = self._files[self.curr_file_idx]
nfiles_remaining = len(self._files) - self.curr_file_idx
log.info("{0:d} file{1} remaining".format(
nfiles_remaining,
's' if nfiles_remaining > 1 else ''))
self.curr_file_idx += 1
return filename
class TreeQueue(BaseTreeChain):
"""
A chain of files in a multiprocessing Queue.
Note that asking for the number of files in the queue with len(treequeue)
can be unreliable. Also, methods not overridden by TreeQueue will always be
called on the current tree, so GetEntries will give you the number of
entries in the current tree.
"""
SENTINEL = None
def __init__(self, name, files, **kwargs):
# multiprocessing.queues d.n.e. until one has been created
multiprocessing.Queue()
if not isinstance(files, multiprocessing.queues.Queue):
raise TypeError("files must be a multiprocessing.Queue")
self._files = files
super(TreeQueue, self).__init__(name, **kwargs)
def __len__(self):
# not reliable
return self._files.qsize()
def __nonzero__(self):
# not reliable
return not self._files.empty()
__bool__ = __nonzero__
def _next_file(self):
filename = self._files.get()
if filename == self.SENTINEL:
return None
return filename
| bsd-3-clause | 24da8dd1b849509f3d9ffedd9fe9a6c8 | 32.517241 | 79 | 0.528713 | 4.421836 | false | false | false | false |
rootpy/rootpy | rootpy/stats/pdf.py | 3 | 1145 | from __future__ import absolute_import
import ROOT
from . import log; log = log[__name__]
from .. import QROOT, asrootpy
from ..base import NamedObject
from .value import AbsArg
__all__ = [
'Simultaneous',
'AddPdf',
'ProdPdf',
]
class Simultaneous(NamedObject, AbsArg, QROOT.RooSimultaneous):
_ROOT = QROOT.RooSimultaneous
def __iter__(self):
iterator = self.indexCat().typeIterator()
category = iterator.Next()
while category:
yield asrootpy(category)
category = iterator.Next()
def getPdf(self, category):
if isinstance(category, ROOT.RooCatType):
category = category.GetName()
return asrootpy(super(Simultaneous, self).getPdf(category))
def pdf(self, category):
return self.getPdf(category)
def indexCat(self):
return asrootpy(super(Simultaneous, self).indexCat())
@property
def index_category(self):
return self.indexCat()
class AddPdf(NamedObject, AbsArg, QROOT.RooAddPdf):
_ROOT = QROOT.RooAddPdf
class ProdPdf(NamedObject, AbsArg, QROOT.RooProdPdf):
_ROOT = QROOT.RooProdPdf
| bsd-3-clause | 0214bee32504f13e7685316718940531 | 22.367347 | 67 | 0.655895 | 3.480243 | false | false | false | false |
rootpy/rootpy | rootpy/io/tests/test_pickler.py | 3 | 1942 | """
Tests for the file module.
"""
from rootpy.io import root_open, TemporaryFile
from rootpy.io.pickler import load, dump
from rootpy.plotting import Hist
import random
import tempfile
from nose.tools import assert_equal, assert_true, assert_false
def test_pickler():
hlist = list()
for i in range(10):
hlist.append(Hist(10, 0, 10))
with TemporaryFile() as tmpfile:
dump(hlist, tmpfile)
hlist_out = load(tmpfile)
assert_equal([h.name for h in hlist_out], [h.name for h in hlist])
hdict = dict()
for i in range(100):
hist = Hist(10, 0, 1, type=random.choice('CSIFD'))
hdict[hist.name] = hist
with TemporaryFile() as tmpfile:
rdir = tmpfile.mkdir('pickle')
dump(hdict, rdir)
hdict_out = load(rdir)
assert_equal(len(hdict_out), 100)
for name, hist in hdict_out.items():
assert_equal(name, hist.name)
assert_equal(hist.TYPE, hdict[hist.name].TYPE)
def test_pickler_proxy():
h = Hist(5, 0, 1, name='hist')
f = tempfile.NamedTemporaryFile(suffix='.root')
with root_open(f.name, 'recreate') as outfile:
dump([h], outfile)
class IsCalled(object):
def __init__(self, func):
self.func = func
self.called = False
def __call__(self, path):
if path != '_pickle;1':
self.called = True
return self.func(path)
with root_open(f.name) as infile:
infile.Get = IsCalled(infile.Get)
hlist = load(infile, use_proxy=False)
assert_true(infile.Get.called)
with root_open(f.name) as infile:
infile.Get = IsCalled(infile.Get)
hlist = load(infile, use_proxy=True)
assert_false(infile.Get.called)
assert_equal(hlist[0].name, 'hist')
assert_true(infile.Get.called)
f.close()
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause | 3b1711f7bc8506bdbc7cd18940b2c10b | 25.972222 | 74 | 0.595778 | 3.425044 | false | false | false | false |
rootpy/rootpy | rootpy/plotting/base.py | 3 | 37647 | """
This module contains base classes defining core funcionality
"""
from __future__ import absolute_import
from functools import wraps
import warnings
import sys
from .. import asrootpy, ROOT
from ..decorators import chainable
from ..memory.keepalive import keepalive
from ..extern.six import string_types
__all__ = [
'dim',
'Plottable',
]
def dim(thing):
if hasattr(thing.__class__, 'DIM'):
return thing.__class__.DIM
elif hasattr(thing, '__dim__'):
return thing.__dim__()
elif hasattr(thing, 'GetDimension'):
return thing.GetDimension()
else:
raise TypeError(
"Unable to determine dimensionality of "
"object of type {0}".format(type(thing)))
class Plottable(object):
"""
This is a mixin to provide additional attributes for plottable classes
and to override ROOT TAttXXX and Draw methods.
"""
EXTRA_ATTRS = {
'norm': None,
'drawstyle': '',
'legendstyle': 'P',
'integermode': False,
'visible': True,
'inlegend': True,
}
EXTRA_ATTRS_DEPRECATED = {
'format': 'drawstyle',
'intmode': 'integermode',
}
EXTRA_SETTERS = [
'color',
]
# TODO: respect current TStyle
DEFAULT_DECOR = {
'markerstyle': 'circle',
'markercolor': 'black',
'markersize': 1,
'fillcolor': 'white',
'fillstyle': 'hollow',
'linecolor': 'black',
'linestyle': 'solid',
'linewidth': 1,
}
@classmethod
def _get_attr_depr(cls, depattr, newattr):
def f(self):
warnings.warn(
"`{0}` is deprecated and will be removed in "
"future versions. Use `{1}` instead".format(
depattr, newattr),
DeprecationWarning)
return getattr(self, newattr)
return f
@classmethod
def _set_attr_depr(cls, depattr, newattr):
def f(self, value):
warnings.warn(
"`{0}` is deprecated and will be removed in "
"future versions. Use `{1}` instead".format(
depattr, newattr),
DeprecationWarning)
setattr(self, newattr, value)
return f
def _post_init(self, **kwargs):
self._clone_post_init(**kwargs)
def _clone_post_init(self, obj=None, **kwargs):
"""
obj must be another Plottable instance. obj is used by Clone to properly
transfer all attributes onto this object.
"""
# Initialize the extra attributes
if obj is None or obj is self:
# We must be asrootpy-ing a ROOT object
# or freshly init-ing a rootpy object
for attr, value in Plottable.EXTRA_ATTRS.items():
# Use the default value
setattr(self, attr, value)
else:
for attr, value in Plottable.EXTRA_ATTRS.items():
setattr(self, attr, getattr(obj, attr))
# Create aliases from deprecated to current attributes
for depattr, newattr in Plottable.EXTRA_ATTRS_DEPRECATED.items():
setattr(Plottable, depattr,
property(
fget=Plottable._get_attr_depr(depattr, newattr),
fset=Plottable._set_attr_depr(depattr, newattr)))
if obj is None or obj is self:
# We must be asrootpy-ing a ROOT object
# or freshly init-ing a rootpy object
# Initialize style attrs to style of TObject
if isinstance(self, ROOT.TAttLine):
self._linecolor = Color(ROOT.TAttLine.GetLineColor(self))
self._linestyle = LineStyle(ROOT.TAttLine.GetLineStyle(self))
self._linewidth = ROOT.TAttLine.GetLineWidth(self)
else: # HistStack
self._linecolor = Color(Plottable.DEFAULT_DECOR['linecolor'])
self._linestyle = LineStyle(Plottable.DEFAULT_DECOR['linestyle'])
self._linewidth = Plottable.DEFAULT_DECOR['linewidth']
if isinstance(self, ROOT.TAttFill):
self._fillcolor = Color(ROOT.TAttFill.GetFillColor(self))
self._fillstyle = FillStyle(ROOT.TAttFill.GetFillStyle(self))
else: # HistStack
self._fillcolor = Color(Plottable.DEFAULT_DECOR['fillcolor'])
self._fillstyle = FillStyle(Plottable.DEFAULT_DECOR['fillstyle'])
if isinstance(self, ROOT.TAttMarker):
self._markercolor = Color(ROOT.TAttMarker.GetMarkerColor(self))
self._markerstyle = MarkerStyle(ROOT.TAttMarker.GetMarkerStyle(self))
self._markersize = ROOT.TAttMarker.GetMarkerSize(self)
else: # HistStack
self._markercolor = Color(Plottable.DEFAULT_DECOR['markercolor'])
self._markerstyle = MarkerStyle(Plottable.DEFAULT_DECOR['markerstyle'])
self._markersize = Plottable.DEFAULT_DECOR['markersize']
if obj is None:
# Populate defaults if we are not asrootpy-ing existing object
decor = dict(**Plottable.DEFAULT_DECOR)
decor.update(Plottable.EXTRA_ATTRS)
if 'color' in kwargs:
decor.pop('linecolor', None)
decor.pop('fillcolor', None)
decor.pop('markercolor', None)
decor.update(kwargs)
self.decorate(**decor)
else:
# Initialize style attrs to style of the other object
if isinstance(obj, ROOT.TAttLine):
self.SetLineColor(obj.GetLineColor())
self.SetLineStyle(obj.GetLineStyle())
self.SetLineWidth(obj.GetLineWidth())
if isinstance(obj, ROOT.TAttFill):
self.SetFillColor(obj.GetFillColor())
self.SetFillStyle(obj.GetFillStyle())
if isinstance(obj, ROOT.TAttMarker):
self.SetMarkerColor(obj.GetMarkerColor())
self.SetMarkerStyle(obj.GetMarkerStyle())
self.SetMarkerSize(obj.GetMarkerSize())
if kwargs:
self.decorate(**kwargs)
#TODO: @chainable
def decorate(self, other=None, **kwargs):
"""
Apply style options to a Plottable object.
Returns a reference to self.
"""
if 'color' in kwargs:
incompatible = []
for othercolor in ('linecolor', 'fillcolor', 'markercolor'):
if othercolor in kwargs:
incompatible.append(othercolor)
if incompatible:
raise ValueError(
"Setting both the `color` and the `{0}` attribute{1} "
"is ambiguous. Please set only one.".format(
', '.join(incompatible),
's' if len(incompatible) != 1 else ''))
if other is not None:
decor = other.decorators
if 'color' in kwargs:
decor.pop('linecolor', None)
decor.pop('fillcolor', None)
decor.pop('markercolor', None)
decor.update(kwargs)
kwargs = decor
for key, value in kwargs.items():
if key in Plottable.EXTRA_ATTRS_DEPRECATED:
newkey = Plottable.EXTRA_ATTRS_DEPRECATED[key]
warnings.warn(
"`{0}` is deprecated and will be removed in "
"future versions. Use `{1}` instead".format(
key, newkey),
DeprecationWarning)
key = newkey
if key in Plottable.EXTRA_ATTRS:
setattr(self, key, value)
elif key == 'markerstyle':
self.SetMarkerStyle(value)
elif key == 'markercolor':
self.SetMarkerColor(value)
elif key == 'markersize':
self.SetMarkerSize(value)
elif key == 'fillcolor':
self.SetFillColor(value)
elif key == 'fillstyle':
self.SetFillStyle(value)
elif key == 'linecolor':
self.SetLineColor(value)
elif key == 'linestyle':
self.SetLineStyle(value)
elif key == 'linewidth':
self.SetLineWidth(value)
elif key == 'color':
self.SetColor(value)
else:
raise AttributeError(
"unknown decoration attribute: `{0}`".format(key))
return self
@property
def decorators(self):
return {
"norm": self.norm,
"drawstyle": self.drawstyle,
"legendstyle": self.legendstyle,
"integermode": self.integermode,
"visible": self.visible,
"inlegend": self.inlegend,
"markercolor": self.GetMarkerColor(),
"markerstyle": self.GetMarkerStyle(),
"markersize": self.GetMarkerSize(),
"fillcolor": self.GetFillColor(),
"fillstyle": self.GetFillStyle(),
"linecolor": self.GetLineColor(),
"linestyle": self.GetLineStyle(),
"linewidth": self.GetLineWidth(),
}
def SetLineColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self._linecolor = Color(color)
if isinstance(self, ROOT.TAttLine):
ROOT.TAttLine.SetLineColor(self, self._linecolor('root'))
def GetLineColor(self, mode=None):
"""
*mode* may be 'root', 'mpl', or None to return the ROOT, matplotlib,
or input value.
"""
return self._linecolor(mode)
@property
def linecolor(self):
return self.GetLineColor()
@linecolor.setter
def linecolor(self, color):
self.SetLineColor(color)
def SetLineStyle(self, style):
"""
*style* may be any line style understood by ROOT or matplotlib.
For full documentation of accepted *style* arguments, see
:class:`rootpy.plotting.style.LineStyle`.
"""
self._linestyle = LineStyle(style)
if isinstance(self, ROOT.TAttLine):
ROOT.TAttLine.SetLineStyle(self, self._linestyle('root'))
def GetLineStyle(self, mode=None):
"""
*mode* may be 'root', 'mpl', or None to return the ROOT, matplotlib,
or input value.
"""
return self._linestyle(mode)
@property
def linestyle(self):
return self.GetLineStyle()
@linestyle.setter
def linestyle(self, style):
self.SetLineStyle(style)
def SetLineWidth(self, width):
if isinstance(self, ROOT.TAttLine):
ROOT.TAttLine.SetLineWidth(self, width)
else:
self._linewidth = width
def GetLineWidth(self):
if isinstance(self, ROOT.TAttLine):
return ROOT.TAttLine.GetLineWidth(self)
else:
return self._linewidth
@property
def linewidth(self):
return self.GetLineWidth()
@linewidth.setter
def linewidth(self, width):
self.SetLineWidth(width)
def SetFillColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self._fillcolor = Color(color)
if isinstance(self, ROOT.TAttFill):
ROOT.TAttFill.SetFillColor(self, self._fillcolor('root'))
def GetFillColor(self, mode=None):
"""
*mode* may be 'root', 'mpl', or None to return the ROOT, matplotlib,
or input value.
"""
return self._fillcolor(mode)
@property
def fillcolor(self):
return self.GetFillColor()
@fillcolor.setter
def fillcolor(self, color):
self.SetFillColor(color)
def SetFillStyle(self, style):
"""
*style* may be any fill style understood by ROOT or matplotlib.
For full documentation of accepted *style* arguments, see
:class:`rootpy.plotting.style.FillStyle`.
"""
self._fillstyle = FillStyle(style)
if isinstance(self, ROOT.TAttFill):
ROOT.TAttFill.SetFillStyle(self, self._fillstyle('root'))
def GetFillStyle(self, mode=None):
"""
*mode* may be 'root', 'mpl', or None to return the ROOT, matplotlib,
or input value.
"""
return self._fillstyle(mode)
@property
def fillstyle(self):
return self.GetFillStyle()
@fillstyle.setter
def fillstyle(self, style):
self.SetFillStyle(style)
def SetMarkerColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self._markercolor = Color(color)
if isinstance(self, ROOT.TAttMarker):
ROOT.TAttMarker.SetMarkerColor(self, self._markercolor('root'))
def GetMarkerColor(self, mode=None):
"""
*mode* may be 'root', 'mpl', or None to return the ROOT, matplotlib,
or input value.
"""
return self._markercolor(mode)
@property
def markercolor(self):
return self.GetMarkerColor()
@markercolor.setter
def markercolor(self, color):
self.SetMarkerColor(color)
def SetMarkerStyle(self, style):
"""
*style* may be any marker style understood by ROOT or matplotlib.
For full documentation of accepted *style* arguments, see
:class:`rootpy.plotting.style.MarkerStyle`.
"""
self._markerstyle = MarkerStyle(style)
if isinstance(self, ROOT.TAttMarker):
ROOT.TAttMarker.SetMarkerStyle(self, self._markerstyle('root'))
def GetMarkerStyle(self, mode=None):
"""
*mode* may be 'root', 'mpl', or None to return the ROOT, matplotlib,
or input value.
"""
return self._markerstyle(mode)
@property
def markerstyle(self):
return self.GetMarkerStyle()
@markerstyle.setter
def markerstyle(self, style):
self.SetMarkerStyle(style)
def SetMarkerSize(self, size):
if isinstance(self, ROOT.TAttMarker):
ROOT.TAttMarker.SetMarkerSize(self, size)
else:
self._markersize = size
def GetMarkerSize(self):
if isinstance(self, ROOT.TAttMarker):
return ROOT.TAttMarker.GetMarkerSize(self)
else:
return self._markersize
@property
def markersize(self):
return self.GetMarkerSize()
@markersize.setter
def markersize(self, size):
self.SetMarkerSize(size)
def SetColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
Set all color attributes with one method call.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self.SetFillColor(color)
self.SetLineColor(color)
self.SetMarkerColor(color)
def GetColor(self):
return self.GetMarkerColor(), self.GetLineColor(), self.GetFillColor()
@property
def color(self):
return self.GetColor()
@color.setter
def color(self, color):
self.SetColor(color)
@property
def xaxis(self):
return asrootpy(self.GetXaxis())
@property
def yaxis(self):
return asrootpy(self.GetYaxis())
@property
def zaxis(self):
return asrootpy(self.GetZaxis())
def Draw(self, *args, **kwargs):
"""
Parameters
----------
args : positional arguments
Positional arguments are passed directly to ROOT's Draw
kwargs : keyword arguments
If keyword arguments are present, then a clone is drawn instead
with DrawCopy, where the name, title, and style attributes are
taken from ``kwargs``.
Returns
-------
If ``kwargs`` is not empty and a clone is drawn, then the clone is
returned, otherwise None is returned.
"""
if kwargs:
return self.DrawCopy(*args, **kwargs)
pad = ROOT.gPad
own_pad = False
if not pad:
# avoid circular import by delaying import until needed here
from .canvas import Canvas
pad = Canvas()
own_pad = True
if self.visible:
if self.drawstyle:
self.__class__.__bases__[-1].Draw(self,
" ".join((self.drawstyle, ) + args))
else:
self.__class__.__bases__[-1].Draw(self, " ".join(args))
pad.Modified()
pad.Update()
if own_pad:
keepalive(self, pad)
def DrawCopy(self, *args, **kwargs):
"""
Parameters
----------
args : positional arguments
Positional arguments are passed directly to ROOT's Draw
kwargs : keyword arguments
The name, title, and style attributes of the clone are
taken from ``kwargs``.
Returns
-------
The clone.
"""
copy = self.Clone(**kwargs)
copy.Draw(*args)
return copy
class _StyleContainer(object):
"""
Base class for grouping together an input style with ROOT and matplotlib
styles.
"""
def __init__(self, value, function):
self._input = value
self._root = function(value, 'root')
try:
self._mpl = function(value, 'mpl')
except ValueError:
self._mpl = self._root
def __call__(self, output_type=None):
if not output_type:
output_type = 'input'
return getattr(self, '_' + output_type)
def __repr__(self):
return str(self._input)
##############################
#### Markers #################
markerstyles_root2mpl = {
1: '.',
2: '+',
3: '*',
4: 'o',
5: 'x',
20: 'o',
21: 's',
22: '^',
23: 'v',
24: 'o',
25: 's',
26: '^',
27: 'd',
28: '+',
29: '*',
30: '*',
31: '*',
32: 'v',
33: 'D',
34: '+',
}
for i in range(6, 20):
markerstyles_root2mpl[i] = '.'
markerstyles_mpl2root = {
'.': 1,
',': 1,
'o': 4,
'v': 23,
'^': 22,
'<': 23,
'>': 22,
'1': 23,
'2': 22,
'3': 23,
'4': 22,
's': 25,
'p': 25,
'*': 3,
'h': 25,
'H': 25,
'+': 2,
'x': 5,
'D': 33,
'd': 27,
'|': 2,
'_': 2,
0: 1, # TICKLEFT
1: 1, # TICKRIGHT
2: 1, # TICKUP
3: 1, # TICKDOWN
4: 1, # CARETLEFT
5: 1, # CARETRIGHT
6: 1, # CARETUP
7: 1, # CARETDOWN
'None': '.',
' ': '.',
'': '.',
}
markerstyles_text2root = {
"smalldot": 6,
"mediumdot": 7,
"largedot": 8,
"dot": 9,
"circle": 20,
"square": 21,
"triangle": 22,
"triangleup": 22,
"triangledown": 23,
"opencircle": 24,
"opensquare": 25,
"opentriangle": 26,
"opendiamond": 27,
"diamond": 33,
"opencross": 28,
"cross": 34,
"openstar": 29,
"fullstar": 30,
"star": 29,
}
def convert_markerstyle(inputstyle, mode, inputmode=None):
"""
Convert *inputstyle* to ROOT or matplotlib format.
Output format is determined by *mode* ('root' or 'mpl'). The *inputstyle*
may be a ROOT marker style, a matplotlib marker style, or a description
such as 'star' or 'square'.
"""
mode = mode.lower()
if mode not in ('mpl', 'root'):
raise ValueError("`{0}` is not valid `mode`".format(mode))
if inputmode is None:
if inputstyle in markerstyles_root2mpl:
inputmode = 'root'
elif inputstyle in markerstyles_mpl2root or '$' in str(inputstyle):
inputmode = 'mpl'
elif inputstyle in markerstyles_text2root:
inputmode = 'root'
inputstyle = markerstyles_text2root[inputstyle]
else:
raise ValueError(
"`{0}` is not a valid `markerstyle`".format(inputstyle))
if inputmode == 'root':
if inputstyle not in markerstyles_root2mpl:
raise ValueError(
"`{0}` is not a valid ROOT `markerstyle`".format(
inputstyle))
if mode == 'root':
return inputstyle
return markerstyles_root2mpl[inputstyle]
else:
if '$' in str(inputstyle):
if mode == 'root':
return 1
else:
return inputstyle
if inputstyle not in markerstyles_mpl2root:
raise ValueError(
"`{0}` is not a valid matplotlib `markerstyle`".format(
inputstyle))
if mode == 'mpl':
return inputstyle
return markerstyles_mpl2root[inputstyle]
class MarkerStyle(_StyleContainer):
"""
Container for grouping together ROOT and matplotlib marker styles.
The *style* argument to the constructor may be a ROOT marker style,
a matplotlib marker style, or one of the following descriptions:
"""
__doc__ = __doc__[:__doc__.rfind('\n') + 1]
__doc__ += '\n'.join([" '{0}'".format(x)
for x in markerstyles_text2root])
if sys.version_info[0] < 3:
del x
__doc__ += """
Examples
--------
>>> style = MarkerStyle('opentriangle')
>>> style('root')
26
>>> style('mpl')
'^'
"""
def __init__(self, style):
_StyleContainer.__init__(self, style, convert_markerstyle)
##############################
#### Lines ###################
linestyles_root2mpl = {
1: 'solid',
2: 'dashed',
3: 'dotted',
4: 'dashdot',
5: 'dashdot',
6: 'dashdot',
7: 'dashed',
8: 'dashdot',
9: 'dashed',
10: 'dashdot',
}
linestyles_mpl2root = {
'solid': 1,
'dashed': 2,
'dotted': 3,
'dashdot': 4,
}
linestyles_text2root = {
'solid': 1,
'dashed': 2,
'dotted': 3,
'dashdot': 4,
'longdashdot': 5,
'longdashdotdotdot': 6,
'longdash': 7,
'longdashdotdot': 8,
'verylongdash': 9,
'verylongdashdot': 10
}
def convert_linestyle(inputstyle, mode, inputmode=None):
"""
Convert *inputstyle* to ROOT or matplotlib format.
Output format is determined by *mode* ('root' or 'mpl'). The *inputstyle*
may be a ROOT line style, a matplotlib line style, or a description
such as 'solid' or 'dotted'.
"""
mode = mode.lower()
if mode not in ('mpl', 'root'):
raise ValueError(
"`{0}` is not a valid `mode`".format(mode))
try:
inputstyle = int(inputstyle)
if inputstyle < 1:
inputstyle = 1
except (TypeError, ValueError):
pass
if inputmode is None:
if inputstyle in linestyles_root2mpl:
inputmode = 'root'
elif inputstyle in linestyles_mpl2root:
inputmode = 'mpl'
elif inputstyle in linestyles_text2root:
inputmode = 'root'
inputstyle = linestyles_text2root[inputstyle]
else:
raise ValueError(
"`{0}` is not a valid `linestyle`".format(
inputstyle))
if inputmode == 'root':
if inputstyle not in linestyles_root2mpl:
raise ValueError(
"`{0}` is not a valid ROOT `linestyle`".format(
inputstyle))
if mode == 'root':
return inputstyle
return linestyles_root2mpl[inputstyle]
else:
if inputstyle not in linestyles_mpl2root:
raise ValueError(
"`{0}` is not a valid matplotlib `linestyle`".format(
inputstyle))
if mode == 'mpl':
return inputstyle
return linestyles_mpl2root[inputstyle]
class LineStyle(_StyleContainer):
"""
Container for grouping together ROOT and matplotlib line styles.
The *style* argument to the constructor may be a ROOT line style,
a matplotlib line style, or one of the following descriptions:
"""
__doc__ = __doc__[:__doc__.rfind('\n') + 1]
__doc__ += '\n'.join([" '{0}'".format(x)
for x in linestyles_text2root])
if sys.version_info[0] < 3:
del x
__doc__ += """
Examples
--------
>>> style = LineStyle('verylongdashdot')
>>> style('root')
10
>>> style('mpl')
'dashdot'
"""
def __init__(self, style):
_StyleContainer.__init__(self, style, convert_linestyle)
##############################
#### Fills ###################
fillstyles_root2mpl = {
0: None,
1001: None,
3003: '.',
3345: '\\',
3354: '/',
3006: '|',
3007: '-',
3011: '*',
3012: 'o',
3013: 'x',
3019: 'O',
}
fillstyles_mpl2root = {}
for key, value in fillstyles_root2mpl.items():
fillstyles_mpl2root[value] = key
fillstyles_mpl2root[None] = 0
fillstyles_text2root = {
'hollow': 0,
'none': 0,
'solid': 1001,
}
def convert_fillstyle(inputstyle, mode, inputmode=None):
"""
Convert *inputstyle* to ROOT or matplotlib format.
Output format is determined by *mode* ('root' or 'mpl'). The *inputstyle*
may be a ROOT fill style, a matplotlib hatch style, None, 'none', 'hollow',
or 'solid'.
"""
mode = mode.lower()
if mode not in ('mpl', 'root'):
raise ValueError("`{0}` is not a valid `mode`".format(mode))
if inputmode is None:
try:
# inputstyle is a ROOT linestyle
inputstyle = int(inputstyle)
inputmode = 'root'
except (TypeError, ValueError):
if inputstyle is None:
inputmode = 'mpl'
elif inputstyle in fillstyles_text2root:
inputmode = 'root'
inputstyle = fillstyles_text2root[inputstyle]
elif inputstyle[0] in fillstyles_mpl2root:
inputmode = 'mpl'
else:
raise ValueError(
"`{0}` is not a valid `fillstyle`".format(inputstyle))
if inputmode == 'root':
if mode == 'root':
return inputstyle
if inputstyle in fillstyles_root2mpl:
return fillstyles_root2mpl[inputstyle]
raise ValueError(
"`{0}` is not a valid `fillstyle`".format(inputstyle))
else:
if inputstyle is not None and inputstyle[0] not in fillstyles_mpl2root:
raise ValueError(
"`{0}` is not a valid matplotlib `fillstyle`".format(
inputstyle))
if mode == 'mpl':
return inputstyle
if inputstyle is None:
return fillstyles_mpl2root[inputstyle]
return fillstyles_mpl2root[inputstyle[0]]
class FillStyle(_StyleContainer):
"""
Container for grouping together ROOT and matplotlib fill styles.
The *style* argument to the constructor may be a ROOT fill style,
a matplotlib fill style, or one of the following descriptions:
"""
__doc__ = __doc__[:__doc__.rfind('\n') + 1]
__doc__ += '\n'.join([" '{0}'".format(x)
for x in fillstyles_text2root])
if sys.version_info[0] < 3:
del x
__doc__ += """
For an input value of 'solid', the matplotlib hatch value will be set to
None, which is the same value as for 'hollow'. The root2matplotlib
functions will all check the ROOT value to see whether to make the fill
solid or hollow.
Examples
--------
>>> style = FillStyle('hollow')
>>> style('root')
0
>>> print style('mpl')
None
"""
def __init__(self, style):
_StyleContainer.__init__(self, style, convert_fillstyle)
##############################
#### Colors ##################
_cnames = {
'r' : '#FF0000', #@IgnorePep8
'g' : '#00FF00',
'b' : '#0000FF',
'c' : '#00BFBF',
'm' : '#BF00BF',
'y' : '#BFBF00',
'k' : '#000000',
'w' : '#FFFFFF',
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
def convert_color(color, mode):
"""
Convert *color* to a TColor if *mode='root'* or to (r,g,b) if 'mpl'.
The *color* argument can be a ROOT TColor or color index, an *RGB*
or *RGBA* sequence or a string in any of several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the transparency value will be ignored.
"""
mode = mode.lower()
if mode not in ('mpl', 'root'):
raise ValueError(
"`{0}` is not a valid `mode`".format(mode))
try:
# color is an r,g,b tuple
color = tuple([float(x) for x in color[:3]])
if max(color) > 1.:
color = tuple([x / 255. for x in color])
if mode == 'root':
return ROOT.TColor.GetColor(*color)
return color
except (ValueError, TypeError):
pass
if isinstance(color, string_types):
if color in _cnames:
# color is a matplotlib letter or an html color name
color = _cnames[color]
if color[0] == '#':
# color is a hex value
color = color.lstrip('#')
lv = len(color)
color = tuple(int(color[i:i + lv // 3], 16)
for i in range(0, lv, lv // 3))
if lv == 3:
color = tuple(x * 16 + x for x in color)
return convert_color(color, mode)
# color is a shade of gray, i.e. '0.3'
return convert_color((color, color, color), mode)
try:
# color is a TColor
color = ROOT.TColor(color)
color = color.GetRed(), color.GetGreen(), color.GetBlue()
return convert_color(color, mode)
except (TypeError, ReferenceError):
pass
try:
# color is a ROOT color index
if color < 0:
color = 0
color = ROOT.gROOT.GetColor(color)
# Protect against the case a histogram with a custom color
# is saved in a ROOT file
if not color:
# Just return black
color = ROOT.gROOT.GetColor(1)
color = color.GetRed(), color.GetGreen(), color.GetBlue()
return convert_color(color, mode)
except (TypeError, ReferenceError):
pass
raise ValueError("'{0!s}' is not a valid `color`".format(color))
class Color(_StyleContainer):
"""
Container for grouping together ROOT and matplotlib colors.
The *color* argument to the constructor can be a ROOT TColor or color index.
If matplotlib is available, it can also accept an *RGB* or *RGBA* sequence,
or a string in any of several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *color* is *RGBA*, the *A* will simply be discarded.
Examples
--------
>>> color = Color(2)
>>> color()
2
>>> color('mpl')
(1.0, 0.0, 0.0)
>>> color = Color('blue')
>>> color('root')
4
>>> color('mpl')
(0.0, 0.0, 1.0)
>>> color = Color('0.25')
>>> color('mpl')
(0.25, 0.25, 0.25)
>>> color('root')
924
"""
def __init__(self, color):
_StyleContainer.__init__(self, color, convert_color)
| bsd-3-clause | f1a2eee04b07016a556fcb350ea80b9c | 30.3725 | 87 | 0.51789 | 3.812354 | false | false | false | false |
rootpy/rootpy | rootpy/extern/byteplay3/wbyteplay.py | 5 | 31871 | # byteplay: CPython assembler/disassembler
# Copyright (C) 2006 Noam Raphael | Version: http://code.google.com/p/byteplay
# Rewritten 2009 Demur Rumed | Version: http://github.com/serprex/byteplay
# Screwed the style over, modified stack logic to be more flexible, updated to Python 3
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
__version__ = '1.0'
__all__ = [
'opmap',
'opname',
'opcodes',
'hasflow',
'stack_effect',
'cmp_op',
'hasarg',
'hasname',
'hasjrel',
'hasjabs',
'hasjump',
'haslocal',
'hascompare',
'hasfree',
'hasconst',
'hascode',
'Opcode',
'SetLineno',
'Label',
'isopcode',
'Code']
from sys import version_info
if version_info < (3, 6):
raise NotImplementedError("Currently only Python versions >3.5 are supported!")
import opcode
from dis import findlabels
from types import CodeType
from enum import Enum
class Opcode(int):
__str__ = __repr__ = lambda s: opname[s]
opmap = {name.replace('+', '_'): Opcode(code) for name, code in opcode.opmap.items()}
opname = {code: name for name, code in opmap.items()}
opcodes = set(opname)
for cmp_op, hasname in opmap.items():
globals()[cmp_op] = hasname
__all__.append(cmp_op)
cmp_op = opcode.cmp_op
hasarg = {x for x in opcodes if x >= opcode.HAVE_ARGUMENT}
hasconst = {Opcode(x) for x in opcode.hasconst}
hasname = {Opcode(x) for x in opcode.hasname}
hasjrel = {Opcode(x) for x in opcode.hasjrel}
hasjabs = {Opcode(x) for x in opcode.hasjabs}
hasjump = hasjabs | hasjrel
haslocal = {Opcode(x) for x in opcode.haslocal}
hascompare = {Opcode(x) for x in opcode.hascompare}
hasfree = {Opcode(x) for x in opcode.hasfree}
hascode = {MAKE_FUNCTION}
STOP_CODE = -1
import dis
# Fix bug in Python 3.6.0 (fixed in 3.6.1)
if (3, 6, 0) <= version_info < (3, 6, 1):
def stack_effect(o, arg):
return (dis.stack_effect(o, arg) if o != CALL_FUNCTION_EX else
-2 if arg else -1)
else:
stack_effect = dis.stack_effect
hasflow = hasjump | {
POP_BLOCK,
END_FINALLY,
BREAK_LOOP,
RETURN_VALUE,
RAISE_VARARGS,
STOP_CODE,
POP_EXCEPT,
WITH_CLEANUP_START,
WITH_CLEANUP_FINISH,
SETUP_ASYNC_WITH}
coroutine_opcodes = {GET_AWAITABLE, GET_AITER, GET_ANEXT, BEFORE_ASYNC_WITH, SETUP_ASYNC_WITH}
class Label:
pass
class SetLinenoType:
def __repr__(self):
return 'SetLineno'
SetLineno = SetLinenoType()
def isopcode(x):
return x is not SetLineno and not isinstance(x, Label)
# Flags for codeobject.co_flags, taken from Include/code.h, other flags are no longer used
CO_OPTIMIZED = 0x0001
CO_NEWLOCALS = 0x0002
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
CO_NESTED = 0x0010
CO_GENERATOR = 0x0020
CO_NOFREE = 0x0040
CO_COROUTINE = 0x0080
CO_ITERABLE_COROUTINE = 0x0100
CO_ASYNC_GENERATOR = 0x0200
CO_FUTURE_BARRY_AS_BDFL = 0x40000
CO_FUTURE_GENERATOR_STOP = 0x80000
class Code(object):
"""An object which holds all the information which a Python code object
holds, but in an easy-to-play-with representation
The attributes are:
Affecting action
code - list of 2-tuples: the code
freevars - list of strings: the free vars of the code (those are names
of variables created in outer functions and used in the function)
args - list of strings: the arguments of the code
kwonly - number of keyword only arguments
varargs - boolean: Does args end with a '*args' argument
varkwargs - boolean: Does args end with a '**kwargs' argument
newlocals - boolean: Should a new local namespace be created
(True in functions, False for module and exec code)
force_generator - set CO_GENERATOR in co_flags for generator Code objects without generator-specific code
Python 3.5:
force_coroutine - set CO_COROUTINE in co_flags for coroutine Code objects (native coroutines) without coroutine-specific code
force_iterable_coroutine - set CO_ITERABLE_COROUTINE in co_flags for generator-based coroutine Code objects
future_generator_stop - set CO_FUTURE_GENERATOR_STOP flag (see PEP-479)
Python 3.6:
force_async_generator - set CO_ASYNC_GENERATOR in co_flags
Not affecting action
name - string: the name of the code (co_name)
filename - string: the file name of the code (co_filename)
firstlineno - int: the first line number (co_firstlineno)
docstring - string or None: the docstring (the first item of co_consts,
if it's str)
code is a list of 2-tuples. The first item is an opcode, or SetLineno, or a
Label instance. The second item is the argument, if applicable, or None"""
def __init__(self, code, freevars, args, kwonly, varargs, varkwargs, newlocals,
name, filename, firstlineno, docstring,
force_generator=False,
*, force_coroutine=False, force_iterable_coroutine=False,
force_async_generator=False, future_generator_stop=False):
self.code = code
self.freevars = freevars
self.args = args
self.kwonly = kwonly
self.varargs = varargs
self.varkwargs = varkwargs
self.newlocals = newlocals
self.name = name
self.filename = filename
self.firstlineno = firstlineno
self.docstring = docstring
self.force_generator = force_generator
self.force_coroutine = force_coroutine
self.force_iterable_coroutine = force_iterable_coroutine
self.force_async_generator = force_async_generator
self.future_generator_stop = future_generator_stop
@staticmethod
def _findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source
Generate pairs offset,lineno as described in Python/compile.c
This is a modified version of dis.findlinestarts, which allows multiplelinestarts
with the same line number"""
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(code.co_lnotab[0::2], code.co_lnotab[1::2]):
if byte_incr:
yield addr, lineno
addr += byte_incr
lineno += line_incr
yield addr, lineno
@classmethod
def from_code(cls, co):
"""Disassemble a Python code object into a Code object"""
free_cell_isection = set(co.co_cellvars) & set(co.co_freevars)
if free_cell_isection:
print(co.co_name + ': has non-empty co.co_cellvars & co.co_freevars', free_cell_isection)
return None
co_code = co.co_code
labels = {addr: Label() for addr in findlabels(co_code)}
linestarts = dict(cls._findlinestarts(co))
cellfree = co.co_cellvars + co.co_freevars
code = []
extended_arg = 0
is_generator = False
is_coroutine = False
for i in range(0, len(co_code), 2):
if i in labels:
code.append((labels[i], None))
if i in linestarts:
code.append((SetLineno, linestarts[i]))
op = Opcode(co_code[i])
arg = co_code[i+1] | extended_arg
if op in hascode:
lastop, lastarg = code[-2]
if lastop != LOAD_CONST:
raise ValueError("%s should be preceded by LOAD_CONST" % op)
sub_code = Code.from_code(lastarg)
if sub_code is None:
print(co.co_name + ': has unexpected subcode block')
return None
code[-2] = (LOAD_CONST, sub_code)
if op == opcode.EXTENDED_ARG:
extended_arg = arg << 8
else:
if op not in hasarg:
code.append((op, None))
continue
extended_arg = 0
byteplay_arg = co.co_consts[arg] if op in hasconst else \
co.co_names[arg] if op in hasname else \
labels[arg] if op in hasjabs else \
labels[i + 2 + arg] if op in hasjrel else \
co.co_varnames[arg] if op in haslocal else \
cmp_op[arg] if op in hascompare else \
cellfree[arg] if op in hasfree else \
arg
code.append((op, byteplay_arg))
if op == YIELD_VALUE or op == YIELD_FROM:
is_generator = True
if op in coroutine_opcodes:
is_coroutine = True
varargs = not not co.co_flags & CO_VARARGS
varkwargs = not not co.co_flags & CO_VARKEYWORDS
force_coroutine = not is_coroutine and (co.co_flags & CO_COROUTINE)
force_iterable_coroutine = co.co_flags & CO_ITERABLE_COROUTINE
force_async_generator = co.co_flags & CO_ASYNC_GENERATOR
is_generator = False if force_async_generator else is_generator
force_generator = not is_generator and (co.co_flags & CO_GENERATOR)
assert not (force_coroutine and force_iterable_coroutine)
assert not (force_coroutine and force_async_generator)
assert not (force_iterable_coroutine and force_async_generator)
future_generator_stop = co.co_flags & CO_FUTURE_GENERATOR_STOP
return cls(code=code,
freevars=co.co_freevars,
args=co.co_varnames[:co.co_argcount + varargs + varkwargs + co.co_kwonlyargcount],
kwonly=co.co_kwonlyargcount,
varargs=varargs,
varkwargs=varkwargs,
newlocals=not not co.co_flags & CO_NEWLOCALS,
name=co.co_name,
filename=co.co_filename,
firstlineno=co.co_firstlineno,
docstring=co.co_consts[0] if co.co_consts and isinstance(co.co_consts[0], str) else None,
force_generator=force_generator,
force_coroutine=force_coroutine,
force_iterable_coroutine=force_iterable_coroutine,
force_async_generator=force_async_generator,
future_generator_stop=future_generator_stop)
def __eq__(self, other):
try:
if (self.freevars != other.freevars or
self.args != other.args or
self.kwonly != other.kwonly or
self.varargs != other.varargs or
self.varkwargs != other.varkwargs or
self.newlocals != other.newlocals or
self.name != other.name or
self.filename != other.filename or
self.firstlineno != other.firstlineno or
self.docstring != other.docstring or
self.force_generator != other.force_generator or
len(self.code) != len(other.code)):
return False
else:
if (self.force_coroutine != other.force_coroutine or
self.force_iterable_coroutine != other.force_iterable_coroutine or
self.future_generator_stop != other.future_generator_stop or
self.force_async_generator != other.force_async_generator):
return False
# This isn't trivial due to labels
lmap = {}
for (op1, arg1), (op2, arg2) in zip(self.code, other.code):
if isinstance(op1, Label):
if lmap.setdefault(arg1, arg2) is not arg2:
return False
else:
if op1 != op2:
return False
if op1 in hasjump:
if lmap.setdefault(arg1, arg2) is not arg2:
return False
elif arg1 != arg2:
return False
return True
except:
return False
def _compute_stacksize(self, logging=False):
code = self.code
label_pos = {op[0]: pos for pos, op in enumerate(code) if isinstance(op[0], Label)}
# sf_targets are the targets of SETUP_FINALLY opcodes. They are recorded
# because they have special stack behaviour. If an exception was raised
# in the block pushed by a SETUP_FINALLY opcode, the block is popped
# and 3 objects are pushed. On return or continue, the block is popped
# and 2 objects are pushed. If nothing happened, the block is popped by
# a POP_BLOCK opcode and 1 object is pushed by a (LOAD_CONST, None)
# operation
# Our solution is to record the stack state of SETUP_FINALLY targets
# as having 3 objects pushed, which is the maximum. However, to make
# stack recording consistent, the get_next_stacks function will always
# yield the stack state of the target as if 1 object was pushed, but
# this will be corrected in the actual stack recording
sf_targets = {label_pos[arg] for op, arg in code
if (op == SETUP_FINALLY or op == SETUP_WITH or op == SETUP_ASYNC_WITH)}
states = [None] * len(code)
maxsize = 0
class BlockType(Enum):
DEFAULT = 0,
TRY_FINALLY = 1,
TRY_EXCEPT = 2,
LOOP_BODY = 3,
WITH_BLOCK = 4,
EXCEPTION = 5,
SILENCED_EXCEPTION_BLOCK = 6,
class State:
def __init__(self, pos=0, stack=(0,), block_stack=(BlockType.DEFAULT,), log=[]):
self._pos = pos
self._stack = stack
self._block_stack = block_stack
self._log = log
@property
def pos(self):
return self._pos
@property
def stack(self):
return self._stack
@stack.setter
def stack(self, val):
self._stack = val
def newstack(self, n):
if self._stack[-1] < -n:
raise ValueError("Popped a non-existing element at %s %s" %
(self._pos, code[self._pos - 4: self._pos + 3]))
return self._stack[:-1] + (self._stack[-1] + n,)
@property
def block_stack(self):
return self._block_stack
@property
def log(self):
return self._log
def newlog(self, msg):
if not logging:
return None
log_msg = str(self._pos) + ": " + msg
if self._stack:
log_msg += " (on stack: "
log_depth = 2
log_depth = min(log_depth, len(self._stack))
for pos in range(-1, -log_depth, -1):
log_msg += str(self._stack[pos]) + ", "
log_msg += str(self._stack[-log_depth])
log_msg += ")"
else:
log_msg += " (empty stack)"
return [log_msg] + self._log
op = [State()]
while op:
cur_state = op.pop()
o = sum(cur_state.stack)
if o > maxsize:
maxsize = o
o, arg = code[cur_state.pos]
if isinstance(o, Label):
if cur_state.pos in sf_targets:
cur_state.stack = cur_state.newstack(5)
if states[cur_state.pos] is None:
states[cur_state.pos] = cur_state
elif states[cur_state.pos].stack != cur_state.stack:
check_pos = cur_state.pos + 1
while code[check_pos][0] not in hasflow:
check_pos += 1
if code[check_pos][0] not in (RETURN_VALUE, RAISE_VARARGS, STOP_CODE):
if cur_state.pos not in sf_targets:
raise ValueError("Inconsistent code at %s %s %s\n%s" %
(cur_state.pos, cur_state.stack, states[cur_state.pos].stack,
code[cur_state.pos - 5:cur_state.pos + 4]))
else:
# SETUP_FINALLY target inconsistent code!
#
# Since Python 3.2 assigned exception is cleared at the end of
# the except clause (named exception handler).
# To perform this CPython (checked in version 3.4.3) adds special
# bytecode in exception handler which currently breaks 'regularity' of bytecode.
# Exception handler is wrapped in try/finally block and POP_EXCEPT opcode
# is inserted before END_FINALLY, as a result cleanup-finally block is executed outside
# except handler. It's not a bug, as it doesn't cause any problems during execution, but
# it breaks 'regularity' and we can't check inconsistency here. Maybe issue should be
# posted to Python bug tracker.
pass
continue
else:
continue
if o not in (BREAK_LOOP, RETURN_VALUE, RAISE_VARARGS, STOP_CODE):
next_pos = cur_state.pos + 1
if not isopcode(o):
op += State(next_pos, cur_state.stack, cur_state.block_stack, cur_state.log),
elif o not in hasflow:
if o in hasarg and not isinstance(arg, int):
se = stack_effect(o, 0)
else:
se = stack_effect(o, arg)
log = cur_state.newlog("non-flow command (" + str(o) + ", se = " + str(se) + ")")
op += State(next_pos, cur_state.newstack(se), cur_state.block_stack, log),
elif o == FOR_ITER:
inside_for_log = cur_state.newlog("FOR_ITER (+1)")
op += State(label_pos[arg], cur_state.newstack(-1), cur_state.block_stack, cur_state.log),\
State(next_pos, cur_state.newstack(1), cur_state.block_stack, inside_for_log)
elif o in (JUMP_FORWARD, JUMP_ABSOLUTE):
after_jump_log = cur_state.newlog(str(o))
op += State(label_pos[arg], cur_state.stack, cur_state.block_stack, after_jump_log),
elif o in (JUMP_IF_FALSE_OR_POP, JUMP_IF_TRUE_OR_POP):
after_jump_log = cur_state.newlog(str(o) + ", jumped")
log = cur_state.newlog(str(o) + ", not jumped (-1)")
op += State(label_pos[arg], cur_state.stack, cur_state.block_stack, after_jump_log),\
State(next_pos, cur_state.newstack(-1), cur_state.block_stack, log)
elif o in {POP_JUMP_IF_TRUE, POP_JUMP_IF_FALSE}:
after_jump_log = cur_state.newlog(str(o) + ", jumped (-1)")
log = cur_state.newlog(str(o) + ", not jumped (-1)")
op += State(label_pos[arg], cur_state.newstack(-1), cur_state.block_stack, after_jump_log),\
State(next_pos, cur_state.newstack(-1), cur_state.block_stack, log)
elif o == CONTINUE_LOOP:
next_stack, next_block_stack = cur_state.stack, cur_state.block_stack
last_popped_block = None
while next_block_stack[-1] != BlockType.LOOP_BODY:
last_popped_block = next_block_stack[-1]
next_stack, next_block_stack = next_stack[:-1], next_block_stack[:-1]
if next_stack != cur_state.stack:
log = cur_state.newlog("CONTINUE_LOOP, from non-loop block")
else:
log = cur_state.newlog("CONTINUE_LOOP")
jump_to_pos = label_pos[arg]
if last_popped_block == BlockType.WITH_BLOCK:
next_stack = next_stack[:-1] + (next_stack[-1] - 1,)
op += State(jump_to_pos, next_stack, next_block_stack, log),
elif o == SETUP_LOOP:
inside_loop_log = cur_state.newlog("SETUP_LOOP (+block)")
op += State(label_pos[arg], cur_state.stack, cur_state.block_stack, cur_state.log),\
State(next_pos, cur_state.stack + (0,), cur_state.block_stack + (BlockType.LOOP_BODY,), inside_loop_log)
elif o == SETUP_EXCEPT:
inside_except_log = cur_state.newlog("SETUP_EXCEPT, exception (+6, +block)")
inside_try_log = cur_state.newlog("SETUP_EXCEPT, try-block (+block)")
op += State(label_pos[arg], cur_state.stack + (6,), cur_state.block_stack + (BlockType.EXCEPTION,), inside_except_log),\
State(next_pos, cur_state.stack + (0,), cur_state.block_stack + (BlockType.TRY_EXCEPT,), inside_try_log)
elif o == SETUP_FINALLY:
inside_finally_block = cur_state.newlog("SETUP_FINALLY (+1)")
inside_try_log = cur_state.newlog("SETUP_FINALLY try-block (+block)")
op += State(label_pos[arg], cur_state.newstack(1), cur_state.block_stack, inside_finally_block),\
State(next_pos, cur_state.stack + (0,), cur_state.block_stack + (BlockType.TRY_FINALLY,), inside_try_log)
elif o == POP_BLOCK:
log = cur_state.newlog("POP_BLOCK (-block)")
op += State(next_pos, cur_state.stack[:-1], cur_state.block_stack[:-1], log),
elif o == POP_EXCEPT:
log = cur_state.newlog("POP_EXCEPT (-block)")
op += State(next_pos, cur_state.stack[:-1], cur_state.block_stack[:-1], log),
elif o == END_FINALLY:
if cur_state.block_stack[-1] == BlockType.SILENCED_EXCEPTION_BLOCK:
log = cur_state.newlog("END_FINALLY pop silenced exception block (-block)")
op += State(next_pos, cur_state.stack[:-1], cur_state.block_stack[:-1], log),
elif cur_state.block_stack[-1] == BlockType.EXCEPTION:
# Reraise exception
pass
else:
log = cur_state.newlog("END_FINALLY (-6)")
op += State(next_pos, cur_state.newstack(-6), cur_state.block_stack, log),
elif o == SETUP_WITH or o == SETUP_ASYNC_WITH:
inside_with_block = cur_state.newlog("SETUP_WITH, with-block (+1, +block)")
inside_finally_block = cur_state.newlog("SETUP_WITH, finally (+1)")
op += State(label_pos[arg], cur_state.newstack(1), cur_state.block_stack, inside_finally_block),\
State(next_pos, cur_state.stack + (1,), cur_state.block_stack + (BlockType.WITH_BLOCK,), inside_with_block)
elif o == WITH_CLEANUP_START:
# There is special case when 'with' __exit__ function returns True,
# that's the signal to silence exception, in this case additional element is pushed
# and next END_FINALLY command won't reraise exception.
# Emulate this situation on WITH_CLEANUP_START with creating special block which will be
# handled differently by WITH_CLEANUP_FINISH and will cause END_FINALLY not to reraise exception.
log = cur_state.newlog("WITH_CLEANUP_START (+1)")
silenced_exception_log = cur_state.newlog("WITH_CLEANUP_START silenced_exception (+block)")
op += State(next_pos, cur_state.newstack(1), cur_state.block_stack, log),\
State(next_pos, cur_state.newstack(-7) + (9,), cur_state.block_stack + (BlockType.SILENCED_EXCEPTION_BLOCK,), silenced_exception_log)
elif o == WITH_CLEANUP_FINISH:
if cur_state.block_stack[-1] == BlockType.SILENCED_EXCEPTION_BLOCK:
# See comment in WITH_CLEANUP_START handler
log = cur_state.newlog("WITH_CLEANUP_FINISH silenced_exception (-1)")
op += State(next_pos, cur_state.newstack(-1), cur_state.block_stack, log),
else:
log = cur_state.newlog("WITH_CLEANUP_FINISH (-2)")
op += State(next_pos, cur_state.newstack(-2), cur_state.block_stack, log),
else:
raise ValueError("Unhandled opcode %s" % o)
return maxsize + 6 # for exception raise in deepest place
def to_code(self, from_function=False):
"""Assemble a Python code object from a Code object"""
num_fastnames = sum(1 for op, arg in self.code if isopcode(op) and op in haslocal)
is_function = self.newlocals or num_fastnames > 0 or len(self.args) > 0
nested = is_function and from_function
co_flags = {op[0] for op in self.code}
if not self.force_async_generator:
is_generator = (self.force_generator or
(YIELD_VALUE in co_flags or YIELD_FROM in co_flags)
)
else:
is_generator = False
no_free = (not self.freevars) and (not co_flags & hasfree)
is_native_coroutine = bool(self.force_coroutine or (co_flags & coroutine_opcodes))
assert not (is_native_coroutine and self.force_iterable_coroutine)
assert not (is_native_coroutine and self.force_async_generator)
co_flags =\
(not(STORE_NAME in co_flags or LOAD_NAME in co_flags or DELETE_NAME in co_flags)) |\
(self.newlocals and CO_NEWLOCALS) |\
(self.varargs and CO_VARARGS) |\
(self.varkwargs and CO_VARKEYWORDS) |\
(is_generator and CO_GENERATOR) |\
(no_free and CO_NOFREE) |\
(nested and CO_NESTED)
co_flags |= (is_native_coroutine and CO_COROUTINE) |\
(self.force_iterable_coroutine and CO_ITERABLE_COROUTINE) |\
(self.future_generator_stop and CO_FUTURE_GENERATOR_STOP) |\
(self.force_async_generator and CO_ASYNC_GENERATOR)
co_consts = [self.docstring]
co_names = []
co_varnames = list(self.args)
co_freevars = tuple(self.freevars)
# Find all cellvars beforehand for two reasons
# Need the number of them to construct the numeric arg for ops in hasfree
# Need to put args which are cells in the beginning of co_cellvars
cellvars = {arg for op, arg in self.code
if isopcode(op) and op in hasfree
and arg not in co_freevars}
co_cellvars = [jumps for jumps in self.args if jumps in cellvars]
def index(seq, item, eq=True, can_append=True):
for i, x in enumerate(seq):
if x == item if eq else x is item:
return i
if can_append:
seq.append(item)
return len(seq) - 1
else:
raise IndexError("Item not found")
jumps = []
label_pos = {}
lastlineno = self.firstlineno
lastlinepos = 0
co_code = bytearray()
co_lnotab = bytearray()
for i, (op, arg) in enumerate(self.code):
if isinstance(op, Label):
label_pos[op] = len(co_code)
elif op is SetLineno:
incr_lineno = arg - lastlineno
incr_pos = len(co_code) - lastlinepos
lastlineno = arg
lastlinepos += incr_pos
if incr_lineno != 0 or incr_pos != 0:
while incr_pos > 255:
co_lnotab += b"\xFF\0"
incr_pos -= 255
while incr_lineno > 255:
co_lnotab += bytes((incr_pos, 255))
incr_pos = 0
incr_lineno -= 255
if incr_pos or incr_lineno:
co_lnotab += bytes((incr_pos, incr_lineno))
elif op == opcode.EXTENDED_ARG:
self.code[i + 1][1] |= 1 << 32
else:
if op in hasconst:
if (isinstance(arg, Code) and
i + 2 < len(self.code) and
self.code[i + 2][0] in hascode):
arg = arg.to_code(from_function=is_function)
assert arg is not None
arg = index(co_consts, arg, 0)
elif op in hasname:
arg = index(co_names, arg)
elif op in hasjump:
jumps.append((len(co_code), arg))
co_code += bytes((0x90, 0, op, 0))
continue
elif op in haslocal:
arg = index(co_varnames, arg)
elif op in hascompare:
arg = index(cmp_op, arg, can_append=False)
elif op in hasfree:
try:
arg = index(co_freevars, arg, can_append=False) + len(cellvars)
except IndexError:
arg = index(co_cellvars, arg)
if arg is None:
arg = 0
if arg > 0xFFFFFF:
co_code += (opcode.EXTENDED_ARG | (arg >> 16 & 0xFF00)).to_bytes(2, "little")
if arg > 0xFFFF:
co_code += (opcode.EXTENDED_ARG | (arg >> 8 & 0xFF00)).to_bytes(2, "little")
if arg > 0xFF:
co_code += (opcode.EXTENDED_ARG | (arg & 0xFF00)).to_bytes(2, "little")
co_code += (op | (arg & 0xFF) << 8).to_bytes(2, "little")
for pos, label in jumps:
jump = label_pos[label]
if co_code[pos+2] in hasjrel:
jump -= pos + 4
if jump > 0xFFFF:
raise NotImplementedError("Multiple EXTENDED_ARG jumps not implemented")
co_code[pos + 3] = jump & 0xFF
co_code[pos + 1] = jump >> 8 & 0xFF
co_argcount = len(self.args) - self.varargs - self.varkwargs - self.kwonly
co_stacksize = self._compute_stacksize()
return CodeType(co_argcount, self.kwonly, len(co_varnames), co_stacksize, co_flags,
bytes(co_code), tuple(co_consts), tuple(co_names), tuple(co_varnames),
self.filename, self.name, self.firstlineno, bytes(co_lnotab), co_freevars,
tuple(co_cellvars))
| bsd-3-clause | 7be44803d9895f01d70fd61d897f530a | 43.637255 | 159 | 0.540303 | 3.980889 | false | false | false | false |
rootpy/rootpy | rootpy/logger/formatter.py | 3 | 1941 | """
Provides a ``CustomFormatter`` and ``CustomColoredFormatter`` which are enable
to insert ANSI color codes.
"""
from __future__ import absolute_import
import logging
__all__ = [
'CustomFormatter',
'CustomColoredFormatter',
]
# The background is set with 40 plus the number of the color, and the foreground with 30
RED, YELLOW, BLUE, WHITE = 1, 3, 4, 7
# These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
FORMAT = "{color}{levelname}$RESET:$BOLD{name}$RESET] {message}"
def insert_seqs(message):
return message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
def remove_seqs(message):
return message.replace("$RESET", "").replace("$BOLD", "")
COLORS = {
'DEBUG' : BLUE,
'INFO' : WHITE,
'WARNING' : YELLOW,
'ERROR' : RED,
'CRITICAL' : RED,
}
class CustomFormatter(logging.Formatter):
def __init__(self, fmt=remove_seqs(FORMAT), datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
if not hasattr(record, "message"):
record.message = record.getMessage()
record.asctime = self.formatTime(record, self.datefmt)
return self._fmt.format(color="", **record.__dict__)
class CustomColoredFormatter(CustomFormatter):
def __init__(self, fmt=insert_seqs(FORMAT), datefmt=None, use_color=True):
CustomFormatter.__init__(self, fmt, datefmt)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
record.color = COLOR_SEQ % (30 + COLORS[levelname])
else:
record.color = ""
if not hasattr(record, "message"):
record.message = record.getMessage()
record.asctime = self.formatTime(record, self.datefmt)
return self._fmt.format(**record.__dict__)
| bsd-3-clause | 9915ed5d2732bba751a6b995e97e4190 | 29.809524 | 88 | 0.634724 | 3.581181 | false | false | false | false |
rootpy/rootpy | examples/plotting/plot_matplotlib_hist.py | 7 | 2749 | #!/usr/bin/env python
"""
=====================================
Plot a ROOT histogram with matplotlib
=====================================
This example demonstrates how a ROOT histogram can be styled with simple
attributes and displayed via ROOT or matplotlib.
"""
print(__doc__)
import ROOT
import numpy as np
from rootpy.plotting import Hist, HistStack, Legend, Canvas
from rootpy.plotting.style import get_style, set_style
from rootpy.plotting.utils import draw
from rootpy.interactive import wait
import rootpy.plotting.root2matplotlib as rplt
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# set the style
style = get_style('ATLAS')
style.SetEndErrorSize(3)
set_style(style)
# set the random seed
ROOT.gRandom.SetSeed(42)
np.random.seed(42)
# signal distribution
signal = 126 + 10 * np.random.randn(100)
signal_obs = 126 + 10 * np.random.randn(100)
# create histograms
h1 = Hist(30, 40, 200, title='Background', markersize=0, legendstyle='F')
h2 = h1.Clone(title='Signal')
h3 = h1.Clone(title='Data', drawstyle='E1 X0', legendstyle='LEP')
h3.markersize = 1.2
# fill the histograms with our distributions
h1.FillRandom('landau', 1000)
map(h2.Fill, signal)
h3.FillRandom('landau', 1000)
map(h3.Fill, signal_obs)
# set visual attributes
h1.fillstyle = 'solid'
h1.fillcolor = 'green'
h1.linecolor = 'green'
h1.linewidth = 0
h2.fillstyle = 'solid'
h2.fillcolor = 'red'
h2.linecolor = 'red'
h2.linewidth = 0
stack = HistStack([h1, h2], drawstyle='HIST E1 X0')
# plot with ROOT
canvas = Canvas(width=700, height=500)
draw([stack, h3], xtitle='Mass', ytitle='Events', pad=canvas)
# set the number of expected legend entries
legend = Legend([h1, h2, h3], leftmargin=0.45, margin=0.3)
legend.Draw()
label = ROOT.TText(0.3, 0.8, 'ROOT')
label.SetTextFont(43)
label.SetTextSize(25)
label.SetNDC()
label.Draw()
canvas.Modified()
canvas.Update()
# plot with matplotlib
set_style('ATLAS', mpl=True)
fig = plt.figure(figsize=(7, 5), dpi=100)
axes = plt.axes()
axes.xaxis.set_minor_locator(AutoMinorLocator())
axes.yaxis.set_minor_locator(AutoMinorLocator())
axes.yaxis.set_major_locator(MultipleLocator(20))
rplt.bar(stack, stacked=True, axes=axes)
rplt.errorbar(h3, xerr=False, emptybins=False, axes=axes)
plt.xlabel('Mass', position=(1., 0.), va='bottom', ha='right')
plt.ylabel('Events', position=(0., 1.), va='top', ha='right')
axes.xaxis.set_label_coords(1., -0.20)
axes.yaxis.set_label_coords(-0.18, 1.)
leg = plt.legend()
axes.text(0.3, 0.8, 'matplotlib',
verticalalignment='center', horizontalalignment='center',
transform=axes.transAxes, fontsize=20)
if not ROOT.gROOT.IsBatch():
plt.show()
# wait for you to close the ROOT canvas before exiting
wait(True)
| bsd-3-clause | d67ea756524d27d42e049db00decedb6 | 28.244681 | 73 | 0.708985 | 2.994553 | false | false | false | false |
rootpy/rootpy | rootpy/plotting/utils.py | 3 | 14154 | from __future__ import absolute_import
from math import log
import operator
from .. import ROOT
from .canvas import _PadBase
from .hist import _Hist, Hist, HistStack
from .graph import _Graph1DBase, Graph
from ..context import preserve_current_canvas, do_nothing
from ..extern.six.moves import range
__all__ = [
'draw',
'get_limits',
'get_band',
'canvases_with',
'find_all_primitives',
'tick_length_pixels',
]
def draw(plottables, pad=None, same=False,
xaxis=None, yaxis=None,
xtitle=None, ytitle=None,
xlimits=None, ylimits=None,
xdivisions=None, ydivisions=None,
logx=False, logy=False,
**kwargs):
"""
Draw a list of histograms, stacks, and/or graphs.
Parameters
----------
plottables : Hist, Graph, HistStack, or list of such objects
List of objects to draw.
pad : Pad or Canvas, optional (default=None)
The pad to draw onto. If None then use the current global pad.
same : bool, optional (default=False)
If True then use 'SAME' draw option for all objects instead of
all but the first. Use this option if you are drawing onto a pad
that already holds drawn objects.
xaxis : TAxis, optional (default=None)
Use this x-axis or use the x-axis of the first plottable if None.
yaxis : TAxis, optional (default=None)
Use this y-axis or use the y-axis of the first plottable if None.
xtitle : str, optional (default=None)
Set the x-axis title.
ytitle : str, optional (default=None)
Set the y-axis title.
xlimits : tuple, optional (default=None)
Set the x-axis limits with a 2-tuple of (min, max)
ylimits : tuple, optional (default=None)
Set the y-axis limits with a 2-tuple of (min, max)
xdivisions : int, optional (default=None)
Set the number of divisions for the x-axis
ydivisions : int, optional (default=None)
Set the number of divisions for the y-axis
logx : bool, optional (default=False)
If True, then set the x-axis to log scale.
logy : bool, optional (default=False)
If True, then set the y-axis to log scale.
kwargs : dict
All extra arguments are passed to get_limits when determining the axis
limits.
Returns
-------
(xaxis, yaxis), (xmin, xmax, ymin, ymax) : tuple
The axes and axes bounds.
See Also
--------
get_limits
"""
context = preserve_current_canvas if pad else do_nothing
if not isinstance(plottables, (tuple, list)):
plottables = [plottables]
elif not plottables:
raise ValueError("plottables is empty")
with context():
if pad is not None:
pad.cd()
# get the axes limits
xmin, xmax, ymin, ymax = get_limits(plottables,
logx=logx, logy=logy,
**kwargs)
if xlimits is not None:
xmin, xmax = xlimits
if ylimits is not None:
ymin, ymax = ylimits
if not same:
obj = plottables.pop(0)
if isinstance(obj, ROOT.THStack):
obj.SetMinimum(ymin)
obj.SetMaximum(ymax)
obj.Draw()
xaxis = obj.xaxis
yaxis = obj.yaxis
# draw the plottables
for i, obj in enumerate(plottables):
if i == 0 and isinstance(obj, ROOT.THStack):
# use SetMin/Max for y-axis
obj.SetMinimum(ymin)
obj.SetMaximum(ymax)
# ROOT: please fix this...
obj.Draw('SAME')
# set the axes limits and titles
if xaxis is not None:
xaxis.SetLimits(xmin, xmax)
xaxis.SetRangeUser(xmin, xmax)
if xtitle is not None:
xaxis.SetTitle(xtitle)
if xdivisions is not None:
xaxis.SetNdivisions(xdivisions)
if yaxis is not None:
yaxis.SetLimits(ymin, ymax)
yaxis.SetRangeUser(ymin, ymax)
if ytitle is not None:
yaxis.SetTitle(ytitle)
if ydivisions is not None:
yaxis.SetNdivisions(ydivisions)
if pad is None:
pad = ROOT.gPad
pad.SetLogx(bool(logx))
pad.SetLogy(bool(logy))
# redraw axes on top
# axes ticks sometimes get hidden by filled histograms
pad.RedrawAxis()
return (xaxis, yaxis), (xmin, xmax, ymin, ymax)
multiadd = lambda a, b: map(operator.add, a, b)
multisub = lambda a, b: map(operator.sub, a, b)
def _limits_helper(x1, x2, a, b, snap=False):
"""
Given x1, x2, a, b, where:
x1 - x0 x3 - x2
a = ------- , b = -------
x3 - x0 x3 - x0
determine the points x0 and x3:
x0 x1 x2 x3
|----------|-----------------|--------|
"""
if x2 < x1:
raise ValueError("x2 < x1")
if a + b >= 1:
raise ValueError("a + b >= 1")
if a < 0:
raise ValueError("a < 0")
if b < 0:
raise ValueError("b < 0")
if snap:
if x1 >= 0:
x1 = 0
a = 0
elif x2 <= 0:
x2 = 0
b = 0
if x1 == x2 == 0:
# garbage in garbage out
return 0., 1.
elif x1 == x2:
# garbage in garbage out
return x1 - 1., x1 + 1.
if a == 0 and b == 0:
return x1, x2
elif a == 0:
return x1, (x2 - b * x1) / (1 - b)
elif b == 0:
return (x1 - a * x2) / (1 - a), x2
x0 = ((b / a) * x1 + x2 - (x2 - x1) / (1 - a - b)) / (1 + b / a)
x3 = (x2 - x1) / (1 - a - b) + x0
return x0, x3
def get_limits(plottables,
xpadding=0,
ypadding=0.1,
xerror_in_padding=True,
yerror_in_padding=True,
snap=True,
logx=False,
logy=False,
logx_crop_value=1E-5,
logy_crop_value=1E-5,
logx_base=10,
logy_base=10):
"""
Get the axes limits that should be used for a 1D histogram, graph, or stack
of histograms.
Parameters
----------
plottables : Hist, Graph, HistStack, or list of such objects
The object(s) for which visually pleasing plot boundaries are
requested.
xpadding : float or 2-tuple, optional (default=0)
The horizontal padding as a fraction of the final plot width.
ypadding : float or 2-tuple, optional (default=0.1)
The vertical padding as a fraction of the final plot height.
xerror_in_padding : bool, optional (default=True)
If False then exclude the x error bars from the calculation of the plot
width.
yerror_in_padding : bool, optional (default=True)
If False then exclude the y error bars from the calculation of the plot
height.
snap : bool, optional (default=True)
Make the minimum or maximum of the vertical range the x-axis depending
on if the plot maximum and minimum are above or below the x-axis. If
the plot maximum is above the x-axis while the minimum is below the
x-axis, then this option will have no effect.
logx : bool, optional (default=False)
If True, then the x-axis is log scale.
logy : bool, optional (default=False)
If True, then the y-axis is log scale.
logx_crop_value : float, optional (default=1E-5)
If an x-axis is using a logarithmic scale then crop all non-positive
values with this value.
logy_crop_value : float, optional (default=1E-5)
If the y-axis is using a logarithmic scale then crop all non-positive
values with this value.
logx_base : float, optional (default=10)
The base used for the logarithmic scale of the x-axis.
logy_base : float, optional (default=10)
The base used for the logarithmic scale of the y-axis.
Returns
-------
xmin, xmax, ymin, ymax : tuple of plot boundaries
The computed x and y-axis ranges.
"""
try:
import numpy as np
use_numpy = True
except ImportError:
use_numpy = False
if not isinstance(plottables, (list, tuple)):
plottables = [plottables]
xmin = float('+inf')
xmax = float('-inf')
ymin = float('+inf')
ymax = float('-inf')
for h in plottables:
if isinstance(h, HistStack):
h = h.sum
if not isinstance(h, (_Hist, _Graph1DBase)):
raise TypeError(
"unable to determine plot axes ranges "
"from object of type `{0}`".format(
type(h)))
if use_numpy:
y_array_min = y_array_max = np.array(list(h.y()))
if yerror_in_padding:
y_array_min = y_array_min - np.array(list(h.yerrl()))
y_array_max = y_array_max + np.array(list(h.yerrh()))
_ymin = y_array_min.min()
_ymax = y_array_max.max()
else:
y_array_min = y_array_max = list(h.y())
if yerror_in_padding:
y_array_min = multisub(y_array_min, list(h.yerrl()))
y_array_max = multiadd(y_array_max, list(h.yerrh()))
_ymin = min(y_array_min)
_ymax = max(y_array_max)
if isinstance(h, _Graph1DBase):
if use_numpy:
x_array_min = x_array_max = np.array(list(h.x()))
if xerror_in_padding:
x_array_min = x_array_min - np.array(list(h.xerrl()))
x_array_max = x_array_max + np.array(list(h.xerrh()))
_xmin = x_array_min.min()
_xmax = x_array_max.max()
else:
x_array_min = x_array_max = list(h.x())
if xerror_in_padding:
x_array_min = multisub(x_array_min, list(h.xerrl()))
x_array_max = multiadd(x_array_max, list(h.xerrh()))
_xmin = min(x_array_min)
_xmax = max(x_array_max)
else:
_xmin = h.xedgesl(1)
_xmax = h.xedgesh(h.nbins(0))
if logy:
_ymin = max(logy_crop_value, _ymin)
_ymax = max(logy_crop_value, _ymax)
if logx:
_xmin = max(logx_crop_value, _xmin)
_xmax = max(logx_crop_value, _xmax)
if _xmin < xmin:
xmin = _xmin
if _xmax > xmax:
xmax = _xmax
if _ymin < ymin:
ymin = _ymin
if _ymax > ymax:
ymax = _ymax
if isinstance(xpadding, (list, tuple)):
if len(xpadding) != 2:
raise ValueError("xpadding must be of length 2")
xpadding_left = xpadding[0]
xpadding_right = xpadding[1]
else:
xpadding_left = xpadding_right = xpadding
if isinstance(ypadding, (list, tuple)):
if len(ypadding) != 2:
raise ValueError("ypadding must be of length 2")
ypadding_top = ypadding[0]
ypadding_bottom = ypadding[1]
else:
ypadding_top = ypadding_bottom = ypadding
if logx:
x0, x3 = _limits_helper(
log(xmin, logx_base), log(xmax, logx_base),
xpadding_left, xpadding_right)
xmin = logx_base ** x0
xmax = logx_base ** x3
else:
xmin, xmax = _limits_helper(
xmin, xmax, xpadding_left, xpadding_right)
if logy:
y0, y3 = _limits_helper(
log(ymin, logy_base), log(ymax, logy_base),
ypadding_bottom, ypadding_top, snap=False)
ymin = logy_base ** y0
ymax = logy_base ** y3
else:
ymin, ymax = _limits_helper(
ymin, ymax, ypadding_bottom, ypadding_top, snap=snap)
return xmin, xmax, ymin, ymax
def get_band(low_hist, high_hist, middle_hist=None):
"""
Convert the low and high histograms into a TGraphAsymmErrors centered at
the middle histogram if not None otherwise the middle between the low and
high points, to be used to draw a (possibly asymmetric) error band.
"""
npoints = low_hist.nbins(0)
band = Graph(npoints)
for i in range(npoints):
center = low_hist.x(i + 1)
width = low_hist.xwidth(i + 1)
low, high = low_hist.y(i + 1), high_hist.y(i + 1)
if middle_hist is not None:
middle = middle_hist.y(i + 1)
else:
middle = (low + high) / 2.
yerrh = max(high - middle, low - middle, 0)
yerrl = abs(min(high - middle, low - middle, 0))
band.SetPoint(i, center, middle)
band.SetPointError(i, width / 2., width / 2.,
yerrl, yerrh)
return band
def canvases_with(drawable):
"""
Return a list of all canvases where `drawable` has been painted.
Note: This function is inefficient because it inspects all objects on all
canvases, recursively. Avoid calling it if you have a large number of
canvases and primitives.
"""
return [c for c in ROOT.gROOT.GetListOfCanvases()
if drawable in find_all_primitives(c)]
def find_all_primitives(pad):
"""
Recursively find all primities on a pad, even those hiding behind a
GetListOfFunctions() of a primitive
"""
result = []
for primitive in pad.GetListOfPrimitives():
result.append(primitive)
if hasattr(primitive, "GetListOfFunctions"):
result.extend(primitive.GetListOfFunctions())
if hasattr(primitive, "GetHistogram"):
p = primitive.GetHistogram()
if p:
result.append(p)
if isinstance(primitive, ROOT.TPad):
result.extend(find_all_primitives(primitive))
return result
def tick_length_pixels(pad, xaxis, yaxis, xlength, ylength=None):
"""
Set the axes tick lengths in pixels
"""
if ylength is None:
ylength = xlength
xaxis.SetTickLength(xlength / float(pad.height_pixels))
yaxis.SetTickLength(ylength / float(pad.width_pixels))
| bsd-3-clause | a7db7f629c313c8fd999fb1b8ed53821 | 30.950339 | 79 | 0.554896 | 3.646059 | false | false | false | false |
pybrain2/pybrain2 | pybrain/structure/modules/gaussianlayer.py | 26 | 1579 | __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from scipy import random
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.tools.functions import expln, explnPrime
from pybrain.structure.parametercontainer import ParameterContainer
class GaussianLayer(NeuronLayer, ParameterContainer):
""" A layer implementing a gaussian interpretation of the input. The mean is
the input, the sigmas are stored in the module parameters."""
def __init__(self, dim, name=None):
NeuronLayer.__init__(self, dim, name)
# initialize sigmas to 0
ParameterContainer.__init__(self, dim, stdParams = 0)
# if autoalpha is set to True, alpha_sigma = alpha_mu = alpha*sigma^2
self.autoalpha = False
self.enabled = True
def setSigma(self, sigma):
"""Wrapper method to set the sigmas (the parameters of the module) to a
certain value. """
assert len(sigma) == self.indim
self._params *= 0
self._params += sigma
def _forwardImplementation(self, inbuf, outbuf):
if not self.enabled:
outbuf[:] = inbuf
else:
outbuf[:] = random.normal(inbuf, expln(self.params))
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
expln_params = expln(self.params)
self._derivs += ((outbuf - inbuf)**2 - expln_params**2) / expln_params * explnPrime(self.params)
inerr[:] = (outbuf - inbuf)
if not self.autoalpha:
inerr /= expln_params**2
self._derivs /= expln_params**2
| bsd-3-clause | 4db5f901acdb186b4c4a909dff056452 | 37.512195 | 104 | 0.647878 | 3.750594 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/ode/tasks/acrobot.py | 31 | 1235 | __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from pybrain.rl.environments import EpisodicTask
from scipy import pi
class GradualRewardTask(EpisodicTask):
''' task gives more reward, the higher the bar is.'''
def __init__(self, environment):
EpisodicTask.__init__(self, environment)
self.maxPower = 0.5
self.reward_history = []
self.count = 0
# normalize to (-1, 1)
self.sensor_limits = [(-pi, pi), (-20, 20)]
#self.actor_limits = [(-1, 1)]
self.actor_limits = None
def isFinished(self):
if self.count > 1000:
self.count = 0
self.reward_history.append(self.getTotalReward())
return True
else:
self.count += 1
return False
def getReward(self):
# calculate reward and return reward
jointSense = self.env.getSensorByName('JointSensor')
veloSense = self.env.getSensorByName('JointVelocitySensor')
j = jointSense[0]
v = veloSense[0]
reward = (abs(j)) ** 2 - 0.2 * abs(v)
# time.sleep(0.001)
return reward
def performAction(self, action):
EpisodicTask.performAction(self, action*self.maxPower)
| bsd-3-clause | 76edc76322217c90919d92729803e27e | 29.875 | 67 | 0.591093 | 3.548851 | false | false | false | false |
pybrain2/pybrain2 | examples/supervised/backprop/parityrnn.py | 26 | 2046 | from __future__ import print_function
#!/usr/bin/env python
""" A simple recurrent neural network that detects parity for arbitrary sequences. """
__author__ = 'Tom Schaul (tom@idsia.ch)'
from datasets import ParityDataSet #@UnresolvedImport
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.structure import RecurrentNetwork, LinearLayer, TanhLayer, BiasUnit, FullConnection
def buildParityNet():
net = RecurrentNetwork()
net.addInputModule(LinearLayer(1, name = 'i'))
net.addModule(TanhLayer(2, name = 'h'))
net.addModule(BiasUnit('bias'))
net.addOutputModule(TanhLayer(1, name = 'o'))
net.addConnection(FullConnection(net['i'], net['h']))
net.addConnection(FullConnection(net['bias'], net['h']))
net.addConnection(FullConnection(net['bias'], net['o']))
net.addConnection(FullConnection(net['h'], net['o']))
net.addRecurrentConnection(FullConnection(net['o'], net['h']))
net.sortModules()
p = net.params
p[:] = [-0.5, -1.5, 1, 1, -1, 1, 1, -1, 1]
p *= 10.
return net
def evalRnnOnSeqDataset(net, DS, verbose = False, silent = False):
""" evaluate the network on all the sequences of a dataset. """
r = 0.
samples = 0.
for seq in DS:
net.reset()
for i, t in seq:
res = net.activate(i)
if verbose:
print(t, res)
r += sum((t-res)**2)
samples += 1
if verbose:
print('-'*20)
r /= samples
if not silent:
print('MSE:', r)
return r
if __name__ == "__main__":
N = buildParityNet()
DS = ParityDataSet()
evalRnnOnSeqDataset(N, DS, verbose = True)
print('(preset weights)')
N.randomize()
evalRnnOnSeqDataset(N, DS)
print('(random weights)')
# Backprop improves the network performance, and sometimes even finds the global optimum.
N.reset()
bp = BackpropTrainer(N, DS, verbose = True)
bp.trainEpochs(5000)
evalRnnOnSeqDataset(N, DS)
print('(backprop-trained weights)')
| bsd-3-clause | 9bdaa7f3ee8ab20ea71827163ae785a1 | 30 | 96 | 0.627566 | 3.376238 | false | false | false | false |
pybrain2/pybrain2 | pybrain/structure/modules/lstm.py | 26 | 5992 | __author__ = 'Daan Wierstra and Tom Schaul'
from scipy import tanh
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.structure.modules.module import Module
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.tools.functions import sigmoid, sigmoidPrime, tanhPrime
class LSTMLayer(NeuronLayer, ParameterContainer):
"""Long short-term memory cell layer.
The input consists of 4 parts, in the following order:
- input gate
- forget gate
- cell input
- output gate
"""
sequential = True
peepholes = False
maxoffset = 0
# Transfer functions and their derivatives
f = lambda _, x: sigmoid(x)
fprime = lambda _, x: sigmoidPrime(x)
g = lambda _, x: tanh(x)
gprime = lambda _, x: tanhPrime(x)
h = lambda _, x: tanh(x)
hprime = lambda _, x: tanhPrime(x)
def __init__(self, dim, peepholes = False, name = None):
"""
:arg dim: number of cells
:key peepholes: enable peephole connections (from state to gates)? """
self.setArgs(dim = dim, peepholes = peepholes)
# Internal buffers, created dynamically:
self.bufferlist = [
('ingate', dim),
('outgate', dim),
('forgetgate', dim),
('ingatex', dim),
('outgatex', dim),
('forgetgatex', dim),
('state', dim),
('ingateError', dim),
('outgateError', dim),
('forgetgateError', dim),
('stateError', dim),
]
Module.__init__(self, 4*dim, dim, name)
if self.peepholes:
ParameterContainer.__init__(self, dim*3)
self._setParameters(self.params)
self._setDerivatives(self.derivs)
def _setParameters(self, p, owner = None):
ParameterContainer._setParameters(self, p, owner)
dim = self.outdim
self.ingatePeepWeights = self.params[:dim]
self.forgetgatePeepWeights = self.params[dim:dim*2]
self.outgatePeepWeights = self.params[dim*2:]
def _setDerivatives(self, d, owner = None):
ParameterContainer._setDerivatives(self, d, owner)
dim = self.outdim
self.ingatePeepDerivs = self.derivs[:dim]
self.forgetgatePeepDerivs = self.derivs[dim:dim*2]
self.outgatePeepDerivs = self.derivs[dim*2:]
def _isLastTimestep(self):
"""Tell wether the current offset is the maximum offset."""
return self.maxoffset == self.offset
def _forwardImplementation(self, inbuf, outbuf):
self.maxoffset = max(self.offset + 1, self.maxoffset)
dim = self.outdim
# slicing the input buffer into the 4 parts
try:
self.ingatex[self.offset] = inbuf[:dim]
except IndexError:
raise str((self.offset, self.ingatex.shape))
self.forgetgatex[self.offset] = inbuf[dim:dim*2]
cellx = inbuf[dim*2:dim*3]
self.outgatex[self.offset] = inbuf[dim*3:]
# peephole treatment
if self.peepholes and self.offset > 0:
self.ingatex[self.offset] += self.ingatePeepWeights * self.state[self.offset-1]
self.forgetgatex[self.offset] += self.forgetgatePeepWeights * self.state[self.offset-1]
self.ingate[self.offset] = self.f(self.ingatex[self.offset])
self.forgetgate[self.offset] = self.f(self.forgetgatex[self.offset])
self.state[self.offset] = self.ingate[self.offset] * self.g(cellx)
if self.offset > 0:
self.state[self.offset] += self.forgetgate[self.offset] * self.state[self.offset-1]
if self.peepholes:
self.outgatex[self.offset] += self.outgatePeepWeights * self.state[self.offset]
self.outgate[self.offset] = self.f(self.outgatex[self.offset])
outbuf[:] = self.outgate[self.offset] * self.h(self.state[self.offset])
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
dim = self.outdim
cellx = inbuf[dim*2:dim*3]
self.outgateError[self.offset] = self.fprime(self.outgatex[self.offset]) * outerr * self.h(self.state[self.offset])
self.stateError[self.offset] = outerr * self.outgate[self.offset] * self.hprime(self.state[self.offset])
if not self._isLastTimestep():
self.stateError[self.offset] += self.stateError[self.offset+1] * self.forgetgate[self.offset+1]
if self.peepholes:
self.stateError[self.offset] += self.ingateError[self.offset+1] * self.ingatePeepWeights
self.stateError[self.offset] += self.forgetgateError[self.offset+1] * self.forgetgatePeepWeights
if self.peepholes:
self.stateError[self.offset] += self.outgateError[self.offset] * self.outgatePeepWeights
cellError = self.ingate[self.offset] * self.gprime(cellx) * self.stateError[self.offset]
if self.offset > 0:
self.forgetgateError[self.offset] = self.fprime(self.forgetgatex[self.offset]) * self.stateError[self.offset] * self.state[self.offset-1]
self.ingateError[self.offset] = self.fprime(self.ingatex[self.offset]) * self.stateError[self.offset] * self.g(cellx)
# compute derivatives
if self.peepholes:
self.outgatePeepDerivs += self.outgateError[self.offset] * self.state[self.offset]
if self.offset > 0:
self.ingatePeepDerivs += self.ingateError[self.offset] * self.state[self.offset-1]
self.forgetgatePeepDerivs += self.forgetgateError[self.offset] * self.state[self.offset-1]
inerr[:dim] = self.ingateError[self.offset]
inerr[dim:dim*2] = self.forgetgateError[self.offset]
inerr[dim*2:dim*3] = cellError
inerr[dim*3:] = self.outgateError[self.offset]
def whichNeuron(self, inputIndex = None, outputIndex = None):
if inputIndex != None:
return inputIndex % self.dim
if outputIndex != None:
return outputIndex
| bsd-3-clause | 0c475615b54b4946f96111227ba02541 | 39.214765 | 149 | 0.633845 | 3.4417 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/functions/transformations.py | 26 | 9364 | __author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import rand, dot, power, diag, eye, sqrt, sin, log, exp, ravel, clip, arange
from scipy.linalg import orth, norm, inv
from random import shuffle, random, gauss
from pybrain.rl.environments.functions.function import FunctionEnvironment
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.rl.environments.fitnessevaluator import FitnessEvaluator
from pybrain.utilities import sparse_orth, dense_orth
from pybrain.rl.environments.functions.multiobjective import MultiObjectiveFunction
def oppositeFunction(basef):
""" the opposite of a function """
if isinstance(basef, FitnessEvaluator):
if isinstance(basef, FunctionEnvironment):
''' added by JPQ '''
if isinstance(basef, MultiObjectiveFunction):
res = MultiObjectiveFunction()
else:
# ---
res = FunctionEnvironment(basef.xdim, basef.xopt)
else:
res = FitnessEvaluator()
res.f = lambda x:-basef.f(x)
if not basef.desiredValue is None:
res.desiredValue = -basef.desiredValue
res.toBeMinimized = not basef.toBeMinimized
return res
else:
return lambda x:-basef(x)
class TranslateFunction(FunctionEnvironment):
""" change the position of the optimum """
def __init__(self, basef, distance=0.1, offset=None):
""" by default the offset is random, with a distance of 0.1 to the old one """
FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
if offset == None:
self._offset = rand(basef.xdim)
self._offset *= distance / norm(self._offset)
else:
self._offset = offset
self.xopt += self._offset
self.desiredValue = basef.desiredValue
self.toBeMinimized = basef.toBeMinimized
def tf(x):
if isinstance(x, ParameterContainer):
x = x.params
return basef.f(x - self._offset)
self.f = tf
class RotateFunction(FunctionEnvironment):
""" make the dimensions non-separable, by applying a matrix transformation to
x before it is given to the function """
def __init__(self, basef, rotMat=None):
""" by default the rotation matrix is random. """
FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
if rotMat == None:
# make a random orthogonal rotation matrix
self._M = orth(rand(basef.xdim, basef.xdim))
else:
self._M = rotMat
self.desiredValue = basef.desiredValue
self.toBeMinimized = basef.toBeMinimized
self.xopt = dot(inv(self._M), self.xopt)
def rf(x):
if isinstance(x, ParameterContainer):
x = x.params
return basef.f(dot(x, self._M))
self.f = rf
def penalize(x, distance=5):
ax = abs(x)
tmp = clip(ax-distance, 0, ax.max())
return dot(tmp, tmp)
#return sum([max(0, abs(xi) - distance) ** 2 for xi in x])
class SoftConstrainedFunction(FunctionEnvironment):
""" Soft constraint handling through a penalization term. """
penalized = True
def __init__(self, basef, distance=5, penalizationFactor=1.):
FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
self.desiredValue = basef.desiredValue
self.toBeMinimized = basef.toBeMinimized
if basef.penalized:
# already OK
self.f = basef.f
else:
if not self.toBeMinimized:
penalizationFactor *= -1
def scf(x):
if isinstance(x, ParameterContainer):
x = x.params
return basef.f(x) + penalize(x, distance) * penalizationFactor
self.f = scf
def generateDiags(alpha, dim, shuffled=False):
diags = [power(alpha, i / (2 * dim - 2.)) for i in range(dim)]
if shuffled:
shuffle(diags)
return diag(diags)
class BBOBTransformationFunction(FunctionEnvironment):
""" Reimplementation of the relatively complex set of function and
variable transformations, and their non-trivial combinations from BBOB 2010.
But in clean, reusable code.
"""
def __init__(self, basef,
translate=True,
rotate=False,
conditioning=None,
asymmetry=None,
oscillate=False,
penalized=0,
desiredValue=1e-8,
gnoise=None,
unoise=None,
cnoise=None,
sparse=True,
):
FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
self._name = basef.__class__.__name__
self.desiredValue = desiredValue
self.toBeMinimized = basef.toBeMinimized
if self.xdim < 500:
sparse = False
if sparse:
try:
from scipy.sparse import csc_matrix
except:
sparse = False
if translate:
self.xopt = (rand(self.xdim) - 0.5) * 9.8
if conditioning:
prefix = generateDiags(conditioning, self.xdim)
if sparse:
prefix = csc_matrix(prefix)
if rotate:
prefix = prefix * sparse_orth(self.xdim)
if oscillate or not asymmetry:
prefix = sparse_orth(self.xdim) * prefix
else:
if rotate:
prefix = dot(prefix, dense_orth(self.xdim))
if oscillate or not asymmetry:
prefix = dot(dense_orth(self.xdim), prefix)
elif rotate and asymmetry and not oscillate:
if sparse:
prefix = sparse_orth(self.xdim)
else:
prefix = dense_orth(self.xdim)
elif sparse:
prefix = None
else:
prefix = eye(self.xdim)
if penalized != 0:
if self.penalized:
penalized = 0
else:
self.penalized = True
# combine transformations
if rotate:
if sparse:
r = sparse_orth(self.xdim)
tmp1 = lambda x: ravel(x * r)
else:
r = dense_orth(self.xdim)
tmp1 = lambda x: dot(x, r)
else:
tmp1 = lambda x: x
if oscillate:
tmp2 = lambda x: BBOBTransformationFunction.oscillatify(tmp1(x))
else:
tmp2 = tmp1
if asymmetry is not None:
tmp3 = lambda x: BBOBTransformationFunction.asymmetrify(tmp2(x), asymmetry)
else:
tmp3 = tmp2
# noise
ntmp = None
if gnoise:
ntmp = lambda f: f * exp(gnoise * gauss(0, 1))
elif unoise:
alpha = 0.49 * (1. / self.xdim) * unoise
ntmp = lambda f: f * power(random(), unoise) * max(1, power(1e9 / (f + 1e-99), alpha * random()))
elif cnoise:
alpha, beta = cnoise
ntmp = lambda f: f + alpha * max(0, 1000 * (random() < beta) * gauss(0, 1) / (abs(gauss(0, 1)) + 1e-199))
def noisetrans(f):
if ntmp is None or f < 1e-8:
return f
else:
return ntmp(f) + 1.01e-8
if sparse:
if prefix is None:
tmp4 = lambda x: tmp3(x - self.xopt)
else:
tmp4 = lambda x: ravel(prefix * tmp3(x - self.xopt))
else:
tmp4 = lambda x: dot(prefix, tmp3(x - self.xopt))
self.f = lambda x: (noisetrans(basef.f(tmp4(x)))
+ penalized * penalize(x))
@staticmethod
def asymmetrify(x, beta=0.2):
dim = len(x)
return x * (x<=0) + (x>0) * exp((1+beta*arange(dim)/(dim-1.)*sqrt(abs(x))) * log(abs(x)+1e-100))
#res = x.copy()
#for i, xi in enumerate(x):
# if xi > 0:
# res[i] = power(xi, 1 + beta * i / (dim - 1.) * sqrt(xi))
#return res
@staticmethod
def _oscillatify(x):
if isinstance(x, float):
res = [x]
else:
res = x.copy()
for i, xi in enumerate(res):
if xi == 0:
continue
elif xi > 0:
s = 1
c1 = 10
c2 = 7.9
else:
s = 1
c1 = 5.5
c2 = 3.1
res[i] = s * exp(log(abs(xi)) + 0.049 * (sin(c1 * xi) + sin(c2 * xi)))
if isinstance(x, float):
return res[0]
else:
return res
@staticmethod
def oscillatify(x):
return exp(log(abs(x)+1e-100)
+ (x>0) * 0.049 * (sin(10 * x) + sin(7.9 * x))
+ (x<0) * 0.049 * (sin(5.5 * x) + sin(3.1 * x)))
| bsd-3-clause | 9341b8f303e3b07d2c5705c60e104644 | 33.557196 | 117 | 0.504485 | 3.90818 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/mazes/polarmaze.py | 25 | 1644 | __author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import zeros
from random import choice, random
from .maze import Maze
class PolarMaze(Maze):
""" Mazes with the emphasis on Perseus: allow him to turn, go forward or backward.
Thus there are 4 states per position.
"""
actions = 5
Stay = 0
Forward = 1
TurnAround = 2
TurnLeft = 3
TurnRight = 4
allActions = [Stay, Forward, TurnAround, TurnLeft, TurnRight]
def reset(self):
Maze.reset(self)
self.perseusDir = choice(list(range(4)))
def performAction(self, action):
if self.stochAction > 0:
if random() < self.stochAction:
action = choice(list(range(len(PolarMaze.allActions))))
act = PolarMaze.allActions[action]
self.bang = False
if act == self.Forward:
tmp = self._moveInDir(self.perseus, Maze.allActions[self.perseusDir])
if self.mazeTable[tmp] == False:
self.perseus = tmp
else:
self.bang = True
elif act == self.TurnLeft:
self.perseusDir = (self.perseusDir + 1) % 4
elif act == self.TurnRight:
self.perseusDir = (self.perseusDir - 1) % 4
elif act == self.TurnAround:
self.perseusDir = (self.perseusDir + 2) % 4
def getSensors(self):
obs = Maze.getSensors(self)
res = zeros(4)
res[:4 - self.perseusDir] = obs[self.perseusDir:]
res[4 - self.perseusDir:] = obs[:self.perseusDir]
return res
def __str__(self):
return Maze.__str__(self) + '(dir:' + str(self.perseusDir) + ')'
| bsd-3-clause | f99abf1168db084ed16d4ed9465c3ba0 | 28.890909 | 86 | 0.579684 | 3.307847 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/shipsteer/northwardtask.py | 25 | 1592 | __author__ = 'Martin Felder, felder@in.tum.de'
from pybrain.rl.environments import EpisodicTask
from .shipsteer import ShipSteeringEnvironment
class GoNorthwardTask(EpisodicTask):
""" The task of balancing some pole(s) on a cart """
def __init__(self, env=None, maxsteps=1000):
"""
:key env: (optional) an instance of a ShipSteeringEnvironment (or a subclass thereof)
:key maxsteps: maximal number of steps (default: 1000)
"""
if env == None:
env = ShipSteeringEnvironment(render=False)
EpisodicTask.__init__(self, env)
self.N = maxsteps
self.t = 0
# scale sensors
# [h, hdot, v]
self.sensor_limits = [(-180.0, +180.0), (-180.0, +180.0), (-10.0, +40.0)]
# actions: thrust, rudder
self.actor_limits = [(-1.0, +2.0), (-90.0, +90.0)]
# scale reward over episode, such that max. return = 100
self.rewardscale = 100. / maxsteps / self.sensor_limits[2][1]
def reset(self):
EpisodicTask.reset(self)
self.t = 0
def performAction(self, action):
self.t += 1
EpisodicTask.performAction(self, action)
def isFinished(self):
if self.t >= self.N:
# maximal timesteps
return True
return False
def getReward(self):
if abs(self.env.getHeading()) < 5.:
return self.env.getSpeed() * self.rewardscale
else:
return 0
def setMaxLength(self, n):
self.N = n
| bsd-3-clause | 0453edd4d6296140cb3df1f36f268efd | 29.615385 | 93 | 0.552764 | 3.537778 | false | false | false | false |
pybrain2/pybrain2 | pybrain/structure/modules/gate.py | 25 | 3451 | # -*- coding: utf-8 -*-
__author__ = 'Justin S Bayer, bayer.justin@googlemail.com'
__version__ = '$Id$'
from pybrain.structure.modules.module import Module
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.tools.functions import sigmoid, sigmoidPrime
class MultiplicationLayer(NeuronLayer):
"""Layer that implements pairwise multiplication."""
def __init__(self, dim, name=None):
Module.__init__(self, 2 * dim, dim, name)
self.setArgs(dim=dim, name=self.name)
def _forwardImplementation(self, inbuf, outbuf):
outbuf += inbuf[:self.outdim] * inbuf[self.outdim:]
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
inerr[:self.outdim] += inbuf[self.outdim:] * outerr
inerr[self.outdim:] += inbuf[:self.outdim] * outerr
class GateLayer(NeuronLayer):
"""Layer that implements pairwise input multiplication, with one element of
the pair being squashed.
If a GateLayer of size n is created, it will have 2 * n inputs and n
outputs. The i'th output is calculated as sigmoid(I_i) * I_(i + n) where I
is the vector of inputs."""
def __init__(self, dim, name=None):
Module.__init__(self, 2 * dim, dim, name)
self.setArgs(dim=dim, name=self.name)
def _forwardImplementation(self, inbuf, outbuf):
outbuf += sigmoid(inbuf[:self.outdim]) * inbuf[self.outdim:]
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
inerr[:self.outdim] += (sigmoidPrime(inbuf[:self.outdim])
* inbuf[self.outdim:]
* outerr)
inerr[self.outdim:] += (sigmoid(inbuf[:self.outdim])
* outerr)
class DoubleGateLayer(NeuronLayer):
"""Layer that implements a continuous if-then-else.
If a DoubleGateLayer of size n is created, it will have 2 * n inputs and
2 * n outputs. The i'th output is calculated as sigmoid(I_i) * I_(i + n) for
i < n and as (1 - sigmoid(I_i) * I_(i + n) for i >= n where I is the vector
of inputs."""
def __init__(self, dim, name=None):
Module.__init__(self, 2 * dim, 2 * dim, name)
self.setArgs(dim=dim, name=self.name)
def _forwardImplementation(self, inbuf, outbuf):
dim = self.indim // 2
outbuf[:dim] += sigmoid(inbuf[:dim]) * inbuf[dim:]
outbuf[dim:] += (1 - sigmoid(inbuf[:dim])) * inbuf[dim:]
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
dim = self.indim // 2
in0 = inbuf[:dim]
in1 = inbuf[dim:]
out0 = outerr[:dim]
out1 = outerr[dim:]
inerr[:dim] += sigmoidPrime(in0) * in1 * out0
inerr[dim:] += sigmoid(in0) * out0
inerr[:dim] -= sigmoidPrime(in0) * in1 * out1
inerr[dim:] += (1 - sigmoid(in0)) * out1
class SwitchLayer(NeuronLayer):
"""Layer that implements pairwise multiplication."""
#:TODO: Misleading docstring
def __init__(self, dim, name=None):
Module.__init__(self, dim, dim * 2, name)
self.setArgs(dim=dim, name=self.name)
def _forwardImplementation(self, inbuf, outbuf):
outbuf[:self.indim] += sigmoid(inbuf)
outbuf[self.indim:] += 1 - sigmoid(inbuf)
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
inerr += sigmoidPrime(inbuf) * outerr[:self.indim]
inerr -= sigmoidPrime(inbuf) * outerr[self.indim:]
| bsd-3-clause | 726faa132a61b1336806793bd8b44744 | 34.57732 | 80 | 0.616053 | 3.363548 | false | false | false | false |
pybrain2/pybrain2 | examples/rl/valuebased/td.py | 30 | 1972 | #!/usr/bin/env python
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
""" This example demonstrates how to use the discrete Temporal Difference
Reinforcement Learning algorithms (SARSA, Q, Q(lambda)) in a classical
fully observable MDP maze task. The goal point is the top right free
field. """
from scipy import * #@UnusedWildImport
import pylab
from pybrain.rl.environments.mazes import Maze, MDPMazeTask
from pybrain.rl.learners.valuebased import ActionValueTable
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners import Q, QLambda, SARSA #@UnusedImport
from pybrain.rl.explorers import BoltzmannExplorer #@UnusedImport
from pybrain.rl.experiments import Experiment
# create the maze with walls (1)
envmatrix = array([[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1]])
env = Maze(envmatrix, (7, 7))
# create task
task = MDPMazeTask(env)
# create value table and initialize with ones
table = ActionValueTable(81, 4)
table.initialize(1.)
# create agent with controller and learner - use SARSA(), Q() or QLambda() here
learner = SARSA()
# standard exploration is e-greedy, but a different type can be chosen as well
# learner.explorer = BoltzmannExplorer()
# create agent
agent = LearningAgent(table, learner)
# create experiment
experiment = Experiment(task, agent)
# prepare plotting
pylab.gray()
pylab.ion()
for i in range(1000):
# interact with the environment (here in batch mode)
experiment.doInteractions(100)
agent.learn()
agent.reset()
# and draw the table
pylab.pcolor(table.params.reshape(81,4).max(1).reshape(9,9))
pylab.draw()
| bsd-3-clause | 8da42ab641707cc98db511c9f46127b5 | 29.338462 | 79 | 0.636917 | 3.029186 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/ode/instances/johnnie.py | 31 | 1429 | __author__ = 'Frank Sehnke, sehnke@in.tum.de'
from pybrain.rl.environments.ode import ODEEnvironment, sensors, actuators
import imp
from scipy import array
class JohnnieEnvironment(ODEEnvironment):
def __init__(self, renderer=True, realtime=False, ip="127.0.0.1", port="21590", buf='16384'):
ODEEnvironment.__init__(self, renderer, realtime, ip, port, buf)
# load model file
self.loadXODE(imp.find_module('pybrain')[1] + "/rl/environments/ode/models/johnnie.xode")
# standard sensors and actuators
self.addSensor(sensors.JointSensor())
self.addSensor(sensors.JointVelocitySensor())
self.addActuator(actuators.JointActuator())
#set act- and obsLength, the min/max angles and the relative max touques of the joints
self.actLen = self.indim
self.obsLen = len(self.getSensors())
#ArmLeft, ArmRight, Hip, PevelLeft, PevelRight, TibiaLeft, TibiaRight, KneeLeft, KneeRight, FootLeft, FootRight
self.tourqueList = array([0.2, 0.2, 0.2, 0.5, 0.5, 2.0, 2.0, 2.0, 2.0, 0.5, 0.5],)
self.cHighList = array([1.0, 1.0, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 0.25, 0.25],)
self.cLowList = array([-0.5, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.25, -0.25],)
self.stepsPerAction = 1
if __name__ == '__main__' :
w = JohnnieEnvironment()
while True:
w.step()
if w.stepCounter == 1000: w.reset()
| bsd-3-clause | 3445d223dbcfc69191fd7802e18d95a5 | 42.30303 | 119 | 0.627712 | 2.764023 | false | false | false | false |
pybrain2/pybrain2 | pybrain/optimization/distributionbased/snes.py | 25 | 4415 | from __future__ import print_function
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.optimization.distributionbased.distributionbased import DistributionBasedOptimizer
from scipy import dot, exp, log, sqrt, floor, ones, randn
from pybrain.tools.rankingfunctions import HansenRanking
class SNES(DistributionBasedOptimizer):
""" Separable NES (diagonal).
[As described in Schaul, Glasmachers and Schmidhuber (GECCO'11)]
"""
# parameters, which can be set but have a good (adapted) default value
centerLearningRate = 1.0
covLearningRate = None
batchSize = None
uniformBaseline = True
shapingFunction = HansenRanking()
initVariance = 1.
# fixed settings
mustMaximize = True
storeAllEvaluations = True
storeAllEvaluated = True
# for very long runs, we don't want to run out of memory
clearStorage = False
# minimal setting where to abort the search
varianceCutoff = 1e-20
def _stoppingCriterion(self):
if DistributionBasedOptimizer._stoppingCriterion(self):
return True
elif max(abs(self._sigmas)) < self.varianceCutoff:
return True
else:
return False
def _initLearningRate(self):
""" Careful, robust default value. """
return 0.6 * (3 + log(self.numParameters)) / 3 / sqrt(self.numParameters)
def _initBatchSize(self):
""" as in CMA-ES """
return 4 + int(floor(3 * log(self.numParameters)))
def _additionalInit(self):
if self.covLearningRate is None:
self.covLearningRate = self._initLearningRate()
if self.batchSize is None:
self.batchSize = self._initBatchSize()
self._center = self._initEvaluable.copy()
self._sigmas = ones(self.numParameters) * self.initVariance
@property
def _population(self):
if self._wasUnwrapped:
return [self._allEvaluated[i].params for i in self._pointers]
else:
return [self._allEvaluated[i] for i in self._pointers]
@property
def _currentEvaluations(self):
fits = [self._allEvaluations[i] for i in self._pointers]
if self._wasOpposed:
fits = [-x for x in fits]
return fits
def _produceSample(self):
return randn(self.numParameters)
def _sample2base(self, sample):
""" How does a sample look in the outside (base problem) coordinate system? """
return self._sigmas * sample + self._center
def _base2sample(self, e):
""" How does the point look in the present one reference coordinates? """
return (e - self._center) / self._sigmas
def _produceSamples(self):
""" Append batch size new samples and evaluate them. """
if self.clearStorage:
self._allEvaluated = []
self._allEvaluations = []
tmp = [self._sample2base(self._produceSample()) for _ in range(self.batchSize)]
list(map(self._oneEvaluation, tmp))
self._pointers = list(range(len(self._allEvaluated) - self.batchSize, len(self._allEvaluated)))
def _learnStep(self):
# produce samples
self._produceSamples()
samples = list(map(self._base2sample, self._population))
#compute utilities
utilities = self.shapingFunction(self._currentEvaluations)
utilities /= sum(utilities) # make the utilities sum to 1
if self.uniformBaseline:
utilities -= 1. / self.batchSize
# update center
dCenter = dot(utilities, samples)
self._center += self.centerLearningRate * self._sigmas * dCenter
# update variances
covGradient = dot(utilities, [s ** 2 - 1 for s in samples])
dA = 0.5 * self.covLearningRate * covGradient
self._sigmas = self._sigmas * exp(dA)
if __name__ == "__main__":
from pybrain.rl.environments.functions.unimodal import ElliFunction
print((SNES(ElliFunction(100), ones(100), verbose=True).learn()))
| bsd-3-clause | c9fb8bab04dc4c8a15e77477a08978da | 36.423729 | 123 | 0.579841 | 4.253372 | false | false | false | false |
pybrain2/pybrain2 | examples/rl/environments/ode/johnnie_reinforce.py | 30 | 2899 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the Johnnie Environment
#
# The Johnnie robot is a body structure with 11 DoF .
# Complex balancing tasks can be learned with this environment.
#
# Control/Actions:
# The agent can control all 11 DOF of the robot.
#
# A wide variety of sensors are available for observation and reward:
# - 11 angles of joints
# - 11 angle velocitys of joints
# - Number of foot parts that have contact to floor
# - Height sensor in head for reward calculation
# - Rotation sensor in 3 dimesnions
#
# Task available are:
# - StandTask, agent has not to fall by himself
# - Robust standing Task, agent has not to fall even then hit by reasonable random forces
# - JumpTask, agent has to maximize the head-vertical position during the episode
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
# Author: Frank Sehnke, sehnke@in.tum.de
#########################################################################
__author__ = "Frank Sehnke"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.rl.environments.ode import JohnnieEnvironment
from pybrain.rl.environments.ode.tasks import StandingTask
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners import Reinforce
from pybrain.rl.experiments import EpisodicExperiment
hiddenUnits = 4
batch=2 #number of samples per learning step
prnts=1 #number of learning steps after results are printed
epis=5000000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts, kind = "learner")#tool for printing and plotting
for runs in range(numbExp):
# create environment
#Options: Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
env = JohnnieEnvironment()
# create task
task = StandingTask(env)
# create controller network
net = buildNetwork(len(task.getObservation()), hiddenUnits, env.actLen, outclass=TanhLayer)
# create agent with controller and learner (and its options)
agent = LearningAgent(net, Reinforce())
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
state, action, reward = agent.learner.dataset.getSequence(agent.learner.dataset.getNumSequences()-1)
et.printResults(reward.sum(), runs, updates)
et.addExps()
et.showExps()
#To view what the simulation is doing at the moment, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
| bsd-3-clause | 7353ec26fb0ef560292f62974573c075 | 41.632353 | 169 | 0.714384 | 3.769831 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/flexcube/environment.py | 25 | 5905 | __author__ = 'Frank Sehnke, sehnke@in.tum.de'
from . import sensors
import threading
from pybrain.utilities import threaded
from pybrain.tools.networking.udpconnection import UDPServer
from pybrain.rl.environments.environment import Environment
from scipy import ones, zeros, array, clip, arange, sqrt
from time import sleep
class FlexCubeEnvironment(Environment):
def __init__(self, render=True, realtime=True, ip="127.0.0.1", port="21560"):
# initialize base class
self.render = render
if self.render:
self.updateDone = True
self.updateLock = threading.Lock()
self.server = UDPServer(ip, port)
self.actLen = 12
self.mySensors = sensors.Sensors(["EdgesReal"])
self.dists = array([20.0, sqrt(2.0) * 20, sqrt(3.0) * 20])
self.gravVect = array([0.0, -100.0, 0.0])
self.centerOfGrav = zeros((1, 3), float)
self.pos = ones((8, 3), float)
self.vel = zeros((8, 3), float)
self.SpringM = ones((8, 8), float)
self.d = 60.0
self.dt = 0.02
self.startHight = 10.0
self.dumping = 0.4
self.fraktMin = 0.7
self.fraktMax = 1.3
self.minAkt = self.dists[0] * self.fraktMin
self.maxAkt = self.dists[0] * self.fraktMax
self.reset()
self.count = 0
self.setEdges()
self.act(array([20.0] * 12))
self.euler()
self.realtime = realtime
self.step = 0
def closeSocket(self):
self.server.UDPInSock.close()
sleep(10)
def setEdges(self):
self.edges = zeros((12, 2), int)
count = 0
c1 = 0
for i in range(2):
for j in range(2):
for k in range(2):
c2 = 0
for i2 in range(2):
for j2 in range(2):
for k2 in range(2):
sum = abs(i - i2) + abs(j - j2) + abs(k - k2)
if sum == 1 and i <= i2 and j <= j2 and k <= k2:
self.edges[count] = [c1, c2]
count += 1
c2 += 1
c1 += 1
def reset(self):
self.action = ones((1, 12), float) * self.dists[0]
for i in range(2):
for j in range(2):
for k in range(2):
self.pos[i * 4 + j * 2 + k] = [i * self.dists[0] - self.dists[0] / 2.0, j * self.dists[0] - self.dists[0] / 2.0 + self.startHight, k * self.dists[0] - self.dists[0] / 2.0]
self.vel = zeros((8, 3), float)
idx0 = arange(8).repeat(8)
idx1 = array(list(range(8)) * 8)
self.difM = self.pos[idx0, :] - self.pos[idx1, :] #vectors from all points to all other points
self.springM = sqrt((self.difM ** 2).sum(axis=1)).reshape(64, 1)
self.distM = self.springM.copy() #distance matrix
self.step = 0
self.mySensors.updateSensor(self.pos, self.vel, self.distM, self.centerOfGrav, self.step, self.action)
if self.render:
if self.server.clients > 0:
# If there are clients send them reset signal
self.server.send(["r", "r"])
def performAction(self, action):
action = self.normAct(action)
self.action = action.copy()
self.act(action)
self.euler()
self.step += 1
if self.render:
if self.updateDone:
self.updateRenderer()
if self.server.clients > 0 and self.realtime:
sleep(0.02)
def getSensors(self):
self.mySensors.updateSensor(self.pos, self.vel, self.distM, self.centerOfGrav, self.step, self.action)
return self.mySensors.getSensor()[:]
def normAct(self, s):
return clip(s, self.minAkt, self.maxAkt)
def act(self, a):
count = 0
for i in self.edges:
self.springM[i[0] * 8 + i[1]] = a[count]
self.springM[i[1] * 8 + i[0]] = a[count]
count += 1
def euler(self):
self.count += 1
#Inner Forces
distM = self.distM.copy()
disM = self.springM - distM #difference between wanted spring lengths and current ones
disM = disM.reshape(64, 1)
distM = distM + 0.0000000001 #hack to prevent divs by 0
#Forces to Velos
#spring vectors normalized to 1 times the actual force from deformation
vel = self.difM / distM
vel *= disM * self.d * self.dt
idx2 = arange(8)
#TODO: arggggg!!!!!
for i in range(8):
self.vel[i] += vel[idx2 + i * 8, :].sum(axis=0)
#Gravity
self.vel += self.gravVect * self.dt
#Dumping
self.vel -= self.vel * self.dumping * self.dt
#velos to positions
self.pos += self.dt * self.vel
#Collisions and friction
for i in range(8):
if self.pos[i][1] < 0.0:
self.pos[i][1] = 0.0
self.vel[i] = self.vel[i] * [0.0, -1.0, 0.0]
self.centerOfGrav = self.pos.sum(axis=0) / 8.0
#Distances of new state
idx0 = arange(8).repeat(8)
idx1 = array(list(range(8)) * 8)
self.difM = self.pos[idx0, :] - self.pos[idx1, :] #vectors from all points to all other points
self.distM = sqrt((self.difM ** 2).sum(axis=1)).reshape(64, 1) #distance matrix
@threaded()
def updateRenderer(self):
self.updateDone = False
if not self.updateLock.acquire(False): return
# Listen for clients
self.server.listen()
if self.server.clients > 0:
# If there are clients send them the new data
self.server.send(repr([self.pos, self.centerOfGrav]))
sleep(0.02)
self.updateLock.release()
self.updateDone = True
| bsd-3-clause | 043ce0edd103fb05fef300a679180d85 | 34.359281 | 191 | 0.52989 | 3.387837 | false | false | false | false |
pybrain2/pybrain2 | pybrain/optimization/populationbased/coevolution/coevolution.py | 25 | 10998 | from __future__ import print_function
__author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import argmax, array
from random import sample, choice, shuffle
from pybrain.utilities import fListToString, Named
class Coevolution(Named):
""" Population-based generational evolutionary algorithm
with fitness being based (paritally) on a relative measure. """
# algorithm parameters
populationSize = 50
selectionProportion = 0.5
elitism = False
parentChildAverage = 1. # proportion of the child
tournamentSize = None
hallOfFameEvaluation = 0. # proportion of HoF evaluations in relative fitness
useSharedSampling = False
# an external absolute evaluator
absEvaluator = None
absEvalProportion = 0
# execution settings
maxGenerations = None
maxEvaluations = None
verbose = False
def __init__(self, relEvaluator, seeds, **args):
"""
:arg relevaluator: an anti-symmetric function that can evaluate 2 elements
:arg seeds: a list of initial guesses
"""
# set parameters
self.setArgs(**args)
self.relEvaluator = relEvaluator
if self.tournamentSize == None:
self.tournamentSize = self.populationSize
# initialize algorithm variables
self.steps = 0
self.generation = 0
# the best host and the best parasite from each generation
self.hallOfFame = []
# the relative fitnesses from each generation (of the selected individuals)
self.hallOfFitnesses = []
# this dictionary stores all the results between 2 players (first one starting):
# { (player1, player2): [games won, total games, cumulative score, list of scores] }
self.allResults = {}
# this dictionary stores the opponents a player has played against.
self.allOpponents = {}
# a list of all previous populations
self.oldPops = []
# build initial populations
self._initPopulation(seeds)
def learn(self, maxSteps=None):
""" Toplevel function, can be called iteratively.
:return: best evaluable found in the last generation. """
if maxSteps != None:
maxSteps += self.steps
while True:
if maxSteps != None and self.steps + self._stepsPerGeneration() > maxSteps:
break
if self.maxEvaluations != None and self.steps + self._stepsPerGeneration() > self.maxEvaluations:
break
if self.maxGenerations != None and self.generation >= self.maxGenerations:
break
self._oneGeneration()
return self.hallOfFame[-1]
def _oneGeneration(self):
self.oldPops.append(self.pop)
self.generation += 1
fitnesses = self._evaluatePopulation()
# store best in hall of fame
besti = argmax(array(fitnesses))
best = self.pop[besti]
bestFits = sorted(fitnesses)[::-1][:self._numSelected()]
self.hallOfFame.append(best)
self.hallOfFitnesses.append(bestFits)
if self.verbose:
print(('Generation', self.generation))
print((' relat. fits:', fListToString(sorted(fitnesses), 4)))
if len(best.params) < 20:
print((' best params:', fListToString(best.params, 4)))
self.pop = self._selectAndReproduce(self.pop, fitnesses)
def _averageWithParents(self, pop, childportion):
for i, p in enumerate(pop[:]):
if p.parent != None:
tmp = p.copy()
tmp.parent = p.parent
tmp._setParameters(p.params * childportion + p.parent.params * (1 - childportion))
pop[i] = tmp
def _evaluatePopulation(self):
hoFtournSize = min(self.generation, int(self.tournamentSize * self.hallOfFameEvaluation))
tournSize = self.tournamentSize - hoFtournSize
if self.useSharedSampling:
opponents = self._sharedSampling(tournSize, self.pop, self.oldPops[-1])
else:
opponents = self.pop
if len(opponents) < tournSize:
tournSize = len(opponents)
self._doTournament(self.pop, opponents, tournSize)
if hoFtournSize > 0:
hoF = list(set(self.hallOfFame))
self._doTournament(self.pop, hoF, hoFtournSize)
fitnesses = []
for p in self.pop:
fit = 0
for opp in opponents:
fit += self._beats(p, opp)
if hoFtournSize > 0:
for opp in hoF:
fit += self._beats(p, opp)
if self.absEvalProportion > 0 and self.absEvaluator != None:
fit = (1 - self.absEvalProportion) * fit + self.absEvalProportion * self.absEvaluator(p)
fitnesses.append(fit)
return fitnesses
def _initPopulation(self, seeds):
if self.parentChildAverage < 1:
for s in seeds:
s.parent = None
self.pop = self._extendPopulation(seeds, self.populationSize)
def _extendPopulation(self, seeds, size):
""" build a population, with mutated copies from the provided
seed pool until it has the desired size. """
res = seeds[:]
for dummy in range(size - len(seeds)):
chosen = choice(seeds)
tmp = chosen.copy()
tmp.mutate()
if self.parentChildAverage < 1:
tmp.parent = chosen
res.append(tmp)
return res
def _selectAndReproduce(self, pop, fits):
""" apply selection and reproduction to host population, according to their fitness."""
# combine population with their fitness, then sort, only by fitness
s = list(zip(fits, pop))
shuffle(s)
s.sort(key=lambda x:-x[0])
# select...
selected = [x[1] for x in s[:self._numSelected()]]
# ... and reproduce
if self.elitism:
newpop = self._extendPopulation(selected, self.populationSize)
if self.parentChildAverage < 1:
self._averageWithParents(newpop, self.parentChildAverage)
else:
newpop = self._extendPopulation(selected, self.populationSize
+ self._numSelected()) [self._numSelected():]
if self.parentChildAverage < 1:
self._averageWithParents(newpop[self._numSelected():], self.parentChildAverage)
return newpop
def _beats(self, h, p):
""" determine the empirically observed score of p playing opp (starting or not).
If they never played, assume 0. """
if (h, p) not in self.allResults:
return 0
else:
hpgames, hscore = self.allResults[(h, p)][1:3]
phgames, pscore = self.allResults[(p, h)][1:3]
return (hscore - pscore) / float(hpgames + phgames)
def _doTournament(self, pop1, pop2, tournamentSize=None):
""" Play a tournament.
:key tournamentSize: If unspecified, play all-against-all
"""
# TODO: Preferably select high-performing opponents?
for p in pop1:
pop3 = pop2[:]
while p in pop3:
pop3.remove(p)
if tournamentSize != None and tournamentSize < len(pop3):
opps = sample(pop3, tournamentSize)
else:
opps = pop3
for opp in opps:
self._relEval(p, opp)
self._relEval(opp, p)
def _globalScore(self, p):
""" The average score over all evaluations for a player. """
if p not in self.allOpponents:
return 0.
scoresum, played = 0., 0
for opp in self.allOpponents[p]:
scoresum += self.allResults[(p, opp)][2]
played += self.allResults[(p, opp)][1]
scoresum -= self.allResults[(opp, p)][2]
played += self.allResults[(opp, p)][1]
# slightly bias the global score in favor of players with more games (just for tie-breaking)
played += 0.01
return scoresum / played
def _sharedSampling(self, numSelect, selectFrom, relativeTo):
""" Build a shared sampling set of opponents """
if numSelect < 1:
return []
# determine the player of selectFrom with the most wins against players from relativeTo (and which ones)
tmp = {}
for p in selectFrom:
beaten = []
for opp in relativeTo:
if self._beats(p, opp) > 0:
beaten.append(opp)
tmp[p] = beaten
beatlist = [(len(p_beaten[1]), self._globalScore(p_beaten[0]), p_beaten[0]) for p_beaten in list(tmp.items())]
shuffle(beatlist)
beatlist.sort(key=lambda x: x[:2])
best = beatlist[-1][2]
unBeaten = list(set(relativeTo).difference(tmp[best]))
otherSelect = selectFrom[:]
otherSelect.remove(best)
return [best] + self._sharedSampling(numSelect - 1, otherSelect, unBeaten)
def _relEval(self, p, opp):
""" a single relative evaluation (in one direction) with the involved bookkeeping."""
if p not in self.allOpponents:
self.allOpponents[p] = []
self.allOpponents[p].append(opp)
if (p, opp) not in self.allResults:
self.allResults[(p, opp)] = [0, 0, 0., []]
res = self.relEvaluator(p, opp)
if res > 0:
self.allResults[(p, opp)][0] += 1
self.allResults[(p, opp)][1] += 1
self.allResults[(p, opp)][2] += res
self.allResults[(p, opp)][3].append(res)
self.steps += 1
def __str__(self):
s = 'Coevolution ('
s += str(self._numSelected())
if self.elitism:
s += '+' + str(self.populationSize - self._numSelected())
else:
s += ',' + str(self.populationSize)
s += ')'
if self.parentChildAverage < 1:
s += ' p_c_avg=' + str(self.parentChildAverage)
return s
def _numSelected(self):
return int(self.populationSize * self.selectionProportion)
def _stepsPerGeneration(self):
res = self.populationSize * self.tournamentSize * 2
return res
if __name__ == '__main__':
# TODO: convert to unittest
x = Coevolution(None, [None], populationSize=1)
x.allResults[(1, 2)] = [1, 1, 1, []]
x.allResults[(2, 1)] = [-1, 1, -1, []]
x.allResults[(2, 5)] = [1, 1, 2, []]
x.allResults[(5, 2)] = [-1, 1, -1, []]
x.allResults[(2, 3)] = [1, 1, 3, []]
x.allResults[(3, 2)] = [-1, 1, -1, []]
x.allResults[(4, 3)] = [1, 1, 4, []]
x.allResults[(3, 4)] = [-1, 1, -1, []]
x.allOpponents[1] = [2]
x.allOpponents[2] = [1, 5]
x.allOpponents[3] = [2, 4]
x.allOpponents[4] = [3]
x.allOpponents[5] = [2]
print((x._sharedSampling(4, [1, 2, 3, 4, 5], [1, 2, 3, 4, 6, 7, 8, 9])))
print(('should be', [4, 1, 2, 5]))
| bsd-3-clause | e85a1b314884a30b5be883b80f46c875 | 36.535836 | 118 | 0.574104 | 3.670895 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/explorers/discrete/boltzmann.py | 31 | 1506 | __author__ = "Thomas Rueckstiess, ruecksti@in.tum.de"
from scipy import array
from pybrain.rl.explorers.discrete.discrete import DiscreteExplorer
from pybrain.utilities import drawGibbs
class BoltzmannExplorer(DiscreteExplorer):
""" A discrete explorer, that executes the actions with probability
that depends on their action values. The boltzmann explorer has
a parameter tau (the temperature). for high tau, the actions are
nearly equiprobable. for tau close to 0, this action selection
becomes greedy.
"""
def __init__(self, tau = 2., decay = 0.9995):
DiscreteExplorer.__init__(self)
self.tau = tau
self.decay = decay
self._state = None
def activate(self, state, action):
""" The super class ignores the state and simply passes the
action through the module. implement _forwardImplementation()
in subclasses.
"""
self._state = state
return DiscreteExplorer.activate(self, state, action)
def _forwardImplementation(self, inbuf, outbuf):
""" Draws a random number between 0 and 1. If the number is less
than epsilon, a random action is chosen. If it is equal or
larger than epsilon, the greedy action is returned.
"""
assert self.module
values = self.module.getActionValues(self._state)
action = drawGibbs(values, self.tau)
self.tau *= self.decay
outbuf[:] = array([action])
| bsd-3-clause | a2ecfce27672db2d22e2d3a682816f38 | 34.023256 | 73 | 0.652722 | 4.126027 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/mazes/tasks/shuttle.py | 25 | 2692 | __author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import array, zeros
from random import random
from .maze import MazeTask
from pybrain.rl.environments.mazes import PolarMaze
class ShuttleDocking(MazeTask):
"""
#######
#. *#
#######
The spaceship needs to dock backwards into the goal station.
"""
actions = 3
observations = 5
discount = 0.95
mazeclass = PolarMaze
finalReward = 10
bangPenalty = -3
initPos = [(1, 1)]
topology = array([[1] * 7,
[1, 0, 0, 0, 0, 0, 1],
[1] * 7, ])
goal = (1, 5)
Backup = 0
Forward = 1
TurnAround = 2
def reset(self):
MazeTask.reset(self)
self.env.perseusDir = 1
def getObservation(self):
""" inold, seeold, black, seenew, innew """
res = zeros(5)
if self.env.perseus == self.env.goal:
res[4] = 1
elif self.env.perseus == self.env.initPos[0]:
res[0] = 1
elif self.env.perseus[1] == 3:
if random() > 0.7:
res[self.env.perseusDir] = 1
else:
res[(self.env.perseusDir + 2) % 4] = 1
else:
res[(self.env.perseusDir + 2) % 4] = 1
return res
def performAction(self, action):
self.steps += 1
if action == self.TurnAround:
self._turn()
elif action == self.Forward:
self._forward()
else: # noisy backup
r = random()
if self.env.perseus[1] == 3:
# in space
if r < 0.1:
self._turn()
elif r < 0.9:
self._backup()
elif ((self.env.perseus[1] == 2 and self.env.perseusDir == 3) or
(self.env.perseus[1] == 4 and self.env.perseusDir == 1)):
# close to station, front to station
if r < 0.3:
self._turn()
elif r < 0.6:
self._backup()
else:
# close to station, back to station
if r < 0.7:
self._backup()
def _backup(self):
self.env.performAction(PolarMaze.TurnAround)
self.env.performAction(PolarMaze.Forward)
self.env.performAction(PolarMaze.TurnAround)
def _turn(self):
self.env.performAction(PolarMaze.TurnAround)
def _forward(self):
old = self.env.perseus
self.env.performAction(PolarMaze.TurnAround)
if self.env.perseus == self.env.goal or self.env.perseus == self.env.initPos[0]:
self.env.perseus = old
self.env.bang = True
| bsd-3-clause | 0befd08d824a682b4082ac983ba332cc | 26.752577 | 88 | 0.497771 | 3.464607 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/experiments/tournament.py | 25 | 4173 | __author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.environments.twoplayergames.twoplayergame import TwoPlayerGame
from pybrain.utilities import Named
class Tournament(Named):
""" the tournament class is a specific kind of experiment, that takes a pool of agents
and has them compete against each other in a TwoPlayerGame. """
# do all moves need to be checked for legality?
forcedLegality = False
def __init__(self, env, agents):
assert isinstance(env, TwoPlayerGame)
self.startcolor = env.startcolor
self.env = env
self.agents = agents
for a in agents:
a.game = self.env
self.reset()
def reset(self):
# a dictionnary attaching a list of outcomes to a player-couple-key
self.results = {}
self.rounds = 0
self.numGames = 0
def _produceAllPairs(self):
""" produce a list of all pairs of agents (assuming ab <> ba)"""
res = []
for a in self.agents:
for b in self.agents:
if a != b:
res.append((a, b))
return res
def _oneGame(self, p1, p2):
""" play one game between two agents p1 and p2."""
self.numGames += 1
self.env.reset()
players = (p1, p2)
p1.color = self.startcolor
p2.color = -p1.color
p1.newEpisode()
p2.newEpisode()
i = 0
while not self.env.gameOver():
p = players[i]
i = (i + 1) % 2 # alternate
act = p.getAction()
if self.forcedLegality:
tries = 0
while not self.env.isLegal(*act):
tries += 1
# CHECKME: maybe the legality check is too specific?
act = p.getAction()
if tries > 50:
raise Exception('No legal move produced!')
self.env.performAction(act)
if players not in self.results:
self.results[players] = []
wincolor = self.env.getWinner()
if wincolor == p1.color:
winner = p1
else:
winner = p2
self.results[players].append(winner)
def organize(self, repeat=1):
""" have all agents play all others in all orders, and repeat. """
for dummy in range(repeat):
self.rounds += 1
for p1, p2 in self._produceAllPairs():
self._oneGame(p1, p2)
return self.results
def eloScore(self, startingscore=1500, k=32):
""" compute the elo score of all the agents, given the games played in the tournament.
Also checking for potentially initial scores among the agents ('elo' variable). """
# initialize
elos = {}
for a in self.agents:
if 'elo' in a.__dict__:
elos[a] = a.elo
else:
elos[a] = startingscore
# adjust ratings
for i, a1 in enumerate(self.agents[:-1]):
for a2 in self.agents[i + 1:]:
# compute score (in favor of a1)
s = 0
outcomes = self.results[(a1, a2)] + self.results[(a2, a1)]
for r in outcomes:
if r == a1:
s += 1.
elif r == self.env.DRAW:
s += 0.5
# what score would have been estimated?
est = len(outcomes) / (1. + 10 ** ((elos[a2] - elos[a1]) / 400.))
delta = k * (s - est)
elos[a1] += delta
elos[a2] -= delta
for a, e in list(elos.items()):
a.elo = e
return elos
def __str__(self):
s = 'Tournament results (' + str(self.rounds) + ' rounds, ' + str(self.numGames) + ' games):\n'
for p1, p2 in self._produceAllPairs():
wins = len([x for x in self.results[(p1, p2)] if x == p1])
losses = len([x for x in self.results[(p1, p2)] if x == p2])
s += ' ' * 3 + p1.name + ' won ' + str(wins) + ' times and lost ' + str(losses) + ' times against ' + p2.name + '\n'
return s
| bsd-3-clause | a46177cef8fa428e1e238d3eee1e32c0 | 34.666667 | 128 | 0.507788 | 3.790191 | false | false | false | false |
pybrain2/pybrain2 | pybrain/tests/unittests/supervised/trainers/test_backprop.py | 28 | 2025 | """
>>> from pybrain.datasets.supervised import SupervisedDataSet
>>> from pybrain.supervised.trainers import BackpropTrainer
>>> from pybrain import FeedForwardNetwork
>>> from pybrain.structure import LinearLayer, SigmoidLayer, FullConnection
>>> from random import randrange
>>> dataset = SupervisedDataSet(6, 2)
>>> for i in range(1000):
... state = [randrange(0, 15),
... randrange(-70, 50),
... randrange(-70, 50),
... randrange(-70, 50),
... randrange(-70, 50),
... float(randrange(1, 5))/20.]
... action = [float(randrange(-1, 1))/10.0,
... randrange(0, 1)]
... dataset.addSample(state, action)
>>>
>>> net = FeedForwardNetwork()
>>>
>>> net.addInputModule(LinearLayer(6, name='in'))
>>> net.addModule(SigmoidLayer(40, name='hidden_0'))
>>> net.addModule(SigmoidLayer(16, name='hidden_1'))
>>> net.addOutputModule(LinearLayer(2, name='out'))
>>>
>>> net.addConnection(FullConnection(net['in'], net['hidden_0']))
>>> net.addConnection(FullConnection(net['hidden_0'], net['hidden_1']))
>>> net.addConnection(FullConnection(net['hidden_1'], net['out']))
>>>
>>> net.sortModules()
>>>
>>> trainer = BackpropTrainer(net,
... dataset=dataset,
... learningrate=0.01,
... lrdecay=1,
... momentum=0.5,
... verbose=False,
... weightdecay=0,
... batchlearning=False)
>>>
>>> trainingErrors, validationErrors = trainer.trainUntilConvergence(
... dataset=dataset,
... maxEpochs=10)
"""
__author__ = 'Steffen Kampmann, steffen.kampmann@gmail.com'
from pybrain.tests import runModuleTestSuite
if __name__ == "__main__":
runModuleTestSuite(__import__('__main__'))
| bsd-3-clause | f098086e1f8b2903964ea7dadb4b70b0 | 37.207547 | 79 | 0.521975 | 3.939689 | false | true | false | false |
pybrain2/pybrain2 | examples/rl/environments/linear_fa/bicycle.py | 26 | 14462 | from __future__ import print_function
"""An attempt to implement Randlov and Alstrom (1998). They successfully
use reinforcement learning to balance a bicycle, and to control it to drive
to a specified goal location. Their work has been used since then by a few
researchers as a benchmark problem.
We only implement the balance task. This implementation differs at least
slightly, since Randlov and Alstrom did not mention anything about how they
annealed/decayed their learning rate, etc. As a result of differences, the
results do not match those obtained by Randlov and Alstrom.
"""
__author__ = 'Chris Dembia, Bruce Cam, Johnny Israeli'
from scipy import asarray
from numpy import sin, cos, tan, sqrt, arcsin, arctan, sign, clip, argwhere
from matplotlib import pyplot as plt
import pybrain.rl.environments
from pybrain.rl.environments.environment import Environment
from pybrain.rl.learners.valuebased.linearfa import SARSALambda_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.utilities import one_to_n
class BicycleEnvironment(Environment):
"""Randlov and Alstrom's bicycle model. This code matches nearly exactly
some c code we found online for simulating Randlov and Alstrom's
bicycle. The bicycle travels at a fixed speed.
"""
# For superclass.
indim = 2
outdim = 10
# Environment parameters.
time_step = 0.01
# Goal position and radius
# Lagouakis (2002) uses angle to goal, not heading, as a state
max_distance = 1000.
# Acceleration on Earth's surface due to gravity (m/s^2):
g = 9.82
# See the paper for a description of these quantities:
# Distances (in meters):
c = 0.66
dCM = 0.30
h = 0.94
L = 1.11
r = 0.34
# Masses (in kilograms):
Mc = 15.0
Md = 1.7
Mp = 60.0
# Velocity of a bicycle (in meters per second), equal to 10 km/h:
v = 10.0 * 1000.0 / 3600.0
# Derived constants.
M = Mc + Mp # See Randlov's code.
Idc = Md * r**2
Idv = 1.5 * Md * r**2
Idl = 0.5 * Md * r**2
Itot = 13.0 / 3.0 * Mc * h**2 + Mp * (h + dCM)**2
sigmad = v / r
def __init__(self):
Environment.__init__(self)
self.reset()
self.actions = [0.0, 0.0]
self._save_wheel_contact_trajectories = False
def performAction(self, actions):
self.actions = actions
self.step()
def saveWheelContactTrajectories(self, opt):
self._save_wheel_contact_trajectories = opt
def step(self):
# Unpack the state and actions.
# -----------------------------
# Want to ignore the previous value of omegadd; it could only cause a
# bug if we assign to it.
(theta, thetad, omega, omegad, _,
xf, yf, xb, yb, psi) = self.sensors
(T, d) = self.actions
# For recordkeeping.
# ------------------
if self._save_wheel_contact_trajectories:
self.xfhist.append(xf)
self.yfhist.append(yf)
self.xbhist.append(xb)
self.ybhist.append(yb)
# Intermediate time-dependent quantities.
# ---------------------------------------
# Avoid divide-by-zero, just as Randlov did.
if theta == 0:
rf = 1e8
rb = 1e8
rCM = 1e8
else:
rf = self.L / np.abs(sin(theta))
rb = self.L / np.abs(tan(theta))
rCM = sqrt((self.L - self.c)**2 + self.L**2 / tan(theta)**2)
phi = omega + np.arctan(d / self.h)
# Equations of motion.
# --------------------
# Second derivative of angular acceleration:
omegadd = 1 / self.Itot * (self.M * self.h * self.g * sin(phi)
- cos(phi) * (self.Idc * self.sigmad * thetad
+ sign(theta) * self.v**2 * (
self.Md * self.r * (1.0 / rf + 1.0 / rb)
+ self.M * self.h / rCM)))
thetadd = (T - self.Idv * self.sigmad * omegad) / self.Idl
# Integrate equations of motion using Euler's method.
# ---------------------------------------------------
# yt+1 = yt + yd * dt.
# Must update omega based on PREVIOUS value of omegad.
omegad += omegadd * self.time_step
omega += omegad * self.time_step
thetad += thetadd * self.time_step
theta += thetad * self.time_step
# Handlebars can't be turned more than 80 degrees.
theta = np.clip(theta, -1.3963, 1.3963)
# Wheel ('tyre') contact positions.
# ---------------------------------
# Front wheel contact position.
front_temp = self.v * self.time_step / (2 * rf)
# See Randlov's code.
if front_temp > 1:
front_temp = sign(psi + theta) * 0.5 * np.pi
else:
front_temp = sign(psi + theta) * arcsin(front_temp)
xf += self.v * self.time_step * -sin(psi + theta + front_temp)
yf += self.v * self.time_step * cos(psi + theta + front_temp)
# Rear wheel.
back_temp = self.v * self.time_step / (2 * rb)
# See Randlov's code.
if back_temp > 1:
back_temp = np.sign(psi) * 0.5 * np.pi
else:
back_temp = np.sign(psi) * np.arcsin(back_temp)
xb += self.v * self.time_step * -sin(psi + back_temp)
yb += self.v * self.time_step * cos(psi + back_temp)
# Preventing numerical drift.
# ---------------------------
# Copying what Randlov did.
current_wheelbase = sqrt((xf - xb)**2 + (yf - yb)**2)
if np.abs(current_wheelbase - self.L) > 0.01:
relative_error = self.L / current_wheelbase - 1.0
xb += (xb - xf) * relative_error
yb += (yb - yf) * relative_error
# Update heading, psi.
# --------------------
delta_y = yf - yb
if (xf == xb) and delta_y < 0.0:
psi = np.pi
else:
if delta_y > 0.0:
psi = arctan((xb - xf) / delta_y)
else:
psi = sign(xb - xf) * 0.5 * np.pi - arctan(delta_y / (xb - xf))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
def reset(self):
theta = 0
thetad = 0
omega = 0
omegad = 0
omegadd = 0
xf = 0
yf = self.L
xb = 0
yb = 0
psi = np.arctan((xb - xf) / (yf - yb))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
self.xfhist = []
self.yfhist = []
self.xbhist = []
self.ybhist = []
def getSteer(self):
return self.sensors[0]
def getTilt(self):
return self.sensors[2]
def get_xfhist(self):
return self.xfhist
def get_yfhist(self):
return self.yfhist
def get_xbhist(self):
return self.xbhist
def get_ybhist(self):
return self.ybhist
def getSensors(self):
return self.sensors
class BalanceTask(pybrain.rl.environments.EpisodicTask):
"""The rider is to simply balance the bicycle while moving with the
speed perscribed in the environment. This class uses a continuous 5
dimensional state space, and a discrete state space.
This class is heavily guided by
pybrain.rl.environments.cartpole.balancetask.BalanceTask.
"""
max_tilt = np.pi / 6.
nactions = 9
def __init__(self, max_time=1000.0):
super(BalanceTask, self).__init__(BicycleEnvironment())
self.max_time = max_time
# Keep track of time in case we want to end episodes based on number of
# time steps.
self.t = 0
@property
def indim(self):
return 1
@property
def outdim(self):
return 5
def reset(self):
super(BalanceTask, self).reset()
self.t = 0
def performAction(self, action):
"""Incoming action is an int between 0 and 8. The action we provide to
the environment consists of a torque T in {-2 N, 0, 2 N}, and a
displacement d in {-.02 m, 0, 0.02 m}.
"""
self.t += 1
assert round(action[0]) == action[0]
# -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for
# action in {6, 7, 8}
torque_selector = np.floor(action[0] / 3.0) - 1.0
T = 2 * torque_selector
# Random number in [-1, 1]:
p = 2.0 * np.random.rand() - 1.0
# -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for
# action in {2, 5, 8}
disp_selector = action[0] % 3 - 1.0
d = 0.02 * disp_selector + 0.02 * p
super(BalanceTask, self).performAction([T, d])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
return self.env.getSensors()[0:5]
def isFinished(self):
# Criterion for ending an episode. From Randlov's paper:
# "When the agent can balance for 1000 seconds, the task is considered
# learned."
if np.abs(self.env.getTilt()) > self.max_tilt:
return True
elapsed_time = self.env.time_step * self.t
if elapsed_time > self.max_time:
return True
return False
def getReward(self):
# -1 reward for falling over; no reward otherwise.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
return 0.0
class LinearFATileCoding3456BalanceTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the state space into 3456 bins. We use the same action
space as in the superclass.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
return np.dot(self.magic_array, bin_indices)
def getBinIndices(self, linear_index):
"""Given a linear index (integer between 0 and outdim), returns the bin
indices for each of the state dimensions.
"""
return linear_index / self.magic_array % self.nbins_across_dims
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
state = one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
return state
class SARSALambda_LinFA_ReplacingTraces(SARSALambda_LinFA):
"""Randlov used replacing traces, but this doesn't exist in PyBrain's
SARSALambda.
"""
def _updateEtraces(self, state, action, responsibility=1.):
self._etraces *= self.rewardDiscount * self._lambda * responsibility
# This assumes that state is an identity vector (like, from one_to_n).
self._etraces[action] = clip(self._etraces[action] + state, -np.inf, 1.)
# Set the trace for all other actions in this state to 0:
action_bit = one_to_n(action, self.num_actions)
for argstate in argwhere(state == 1) :
self._etraces[argwhere(action_bit != 1), argstate] = 0.
task = LinearFATileCoding3456BalanceTask()
env = task.env
# The learning is very sensitive to the learning rate decay.
learner = SARSALambda_LinFA_ReplacingTraces(task.nactions, task.outdim,
learningRateDecay=2000)
learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
agent.logging = False
exp = EpisodicExperiment(task, agent)
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
env.saveWheelContactTrajectories(True)
plt.ion()
plt.figure(figsize=(8, 4))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
def update_wheel_trajectories():
front_lines = ax2.plot(env.get_xfhist(), env.get_yfhist(), 'r')
back_lines = ax2.plot(env.get_xbhist(), env.get_ybhist(), 'b')
plt.axis('equal')
perform_cumrewards = []
for irehearsal in range(7000):
# Learn.
# ------
r = exp.doEpisodes(1)
# Discounted reward.
cumreward = exp.task.getTotalReward()
#print 'cumreward: %.4f; nsteps: %i; learningRate: %.4f' % (
# cumreward, len(r[0]), exp.agent.learner.learningRate)
if irehearsal % 50 == 0:
# Perform (no learning).
# ----------------------
# Swap out the agent.
exp.agent = performance_agent
# Perform.
r = exp.doEpisodes(1)
perform_cumreward = task.getTotalReward()
perform_cumrewards.append(perform_cumreward)
print('PERFORMANCE: cumreward:', perform_cumreward, 'nsteps:', len(r[0]))
# Swap back the learning agent.
performance_agent.reset()
exp.agent = agent
ax1.cla()
ax1.plot(perform_cumrewards, '.--')
# Wheel trajectories.
update_wheel_trajectories()
plt.pause(0.001)
| bsd-3-clause | 576e50a290f8002c8a6f4c25357ec50d | 32.554524 | 115 | 0.572673 | 3.325362 | false | false | false | false |
pybrain2/pybrain2 | pybrain/tools/rankingfunctions.py | 25 | 4779 | """ Ranking functions that are used in Black-box optimization, or for selection. """
__author__ = 'Daan Wierstra and Tom Schaul'
from pybrain.utilities import Named
from random import randint
from scipy import zeros, argmax, array, power, exp, sqrt, var, zeros_like, arange, mean, log
def rankedFitness(R):
""" produce a linear ranking of the fitnesses in R.
(The highest rank is the best fitness)"""
#l = sorted(list(enumerate(R)), cmp = lambda a,b: cmp(a[1],b[1]))
#l = sorted(list(enumerate(l)), cmp = lambda a,b: cmp(a[1],b[1]))
#return array(map(lambda (r, dummy): r, l))
res = zeros_like(R)
l = list(zip(R, list(range(len(R)))))
l.sort()
for i, (_, j) in enumerate(l):
res[j] = i
return res
def normalizedFitness(R):
return array((R - mean(R)) / sqrt(var(R))).flatten()
class RankingFunction(Named):
""" Default: ranked and scaled to [0,1]."""
def __init__(self, **args):
self.setArgs(**args)
n = self.__class__.__name__
for k, val in list(args.items()):
n += '-' + str(k) + '=' + str(val)
self.name = n
def __call__(self, R):
""" :key R: one-dimensional array containing fitnesses. """
res = rankedFitness(R)
return res / float(max(res))
class TournamentSelection(RankingFunction):
""" Standard evolution tournament selection, the returned array contains intergers for the samples that
are selected indicating how often they are. """
tournamentSize = 2
def __call__(self, R):
res = zeros(len(R))
for i in range(len(R)):
l = [i]
for dummy in range(self.tournamentSize - 1):
randindex = i
while randindex == i:
randindex = randint(0, len(R) - 1)
l.append(randindex)
fits = [R[x] for x in l]
res[argmax(fits)] += 1
return res
class SmoothGiniRanking(RankingFunction):
""" a smooth ranking function that gives more importance to examples with better fitness.
Rescaled to be between 0 and 1"""
gini = 0.1
linearComponent = 0.
def __call__(self, R):
def smoothup(x):
""" produces a mapping from [0,1] to [0,1], with a specific gini coefficient. """
return power(x, 2 / self.gini - 1)
ranks = rankedFitness(R)
res = zeros(len(R))
for i in range(len(ranks)):
res[i] = ranks[i] * self.linearComponent + smoothup(ranks[i] / float(len(R) - 1)) * (1 - self.linearComponent)
res /= max(res)
return res
class ExponentialRanking(RankingFunction):
""" Exponential transformation (with a temperature parameter) of the rank values. """
temperature = 10.
def __call__(self, R):
ranks = rankedFitness(R)
ranks = ranks / (len(R) - 1.0)
return exp(ranks * self.temperature)
class HansenRanking(RankingFunction):
""" Ranking, as used in CMA-ES """
def __call__(self, R):
ranks = rankedFitness(R)
return array([max(0., x) for x in log(len(R)/2.+1.0)-log(len(R)-array(ranks))])
class TopSelection(RankingFunction):
""" Select the fraction of the best ranked fitnesses. """
topFraction = 0.1
def __call__(self, R):
res = zeros(len(R))
ranks = rankedFitness(R)
cutoff = len(R) * (1. - self.topFraction)
for i in range(len(R)):
if ranks[i] >= cutoff:
res[i] = 1.0
else:
res[i] = 0.0
return res
class TopLinearRanking(TopSelection):
""" Select the fraction of the best ranked fitnesses
and scale them linearly between 0 and 1. """
topFraction = 0.2
def __call__(self, R):
res = zeros(len(R))
ranks = rankedFitness(R)
cutoff = len(R) * (1. - self.topFraction)
for i in range(len(R)):
if ranks[i] >= cutoff:
res[i] = ranks[i] - cutoff
else:
res[i] = 0.0
res /= max(res)
return res
def getPossibleParameters(self, numberOfSamples):
x = 1. / float(numberOfSamples)
return arange(x * 2, 1 + x, x)
def setParameter(self, p):
self.topFraction = p
class BilinearRanking(RankingFunction):
""" Bi-linear transformation, rescaled. """
bilinearFactor = 20
def __call__(self, R):
ranks = rankedFitness(R)
res = zeros(len(R))
transitionpoint = 4 * len(ranks) / 5
for i in range(len(ranks)):
if ranks[i] < transitionpoint:
res[i] = ranks[i]
else:
res[i] = ranks[i] + (ranks[i] - transitionpoint) * self.bilinearFactor
res /= max(res)
return res
| bsd-3-clause | 7d60969e0fbdb90aee4feb62bc60abb6 | 28.319018 | 122 | 0.56267 | 3.458032 | false | false | false | false |
pybrain2/pybrain2 | pybrain/auxiliary/pca.py | 31 | 2607 | # -*- coding: utf-8 -*-
"""Module that contains functionality for calculating the principal components
of a dataset."""
__author__ = 'Justin S Bayer, bayerj@in.tum.de'
from scipy import asmatrix, cov
from scipy.linalg import inv, eig
from numpy.random import standard_normal
def reduceDim(data, dim, func='pca'):
"""Reduce the dimension of datapoints to dim via principal component
analysis.
A matrix of shape (n, d) specifies n points of dimension d.
"""
try:
pcaFunc = globals()[func]
except KeyError:
raise ValueError('Unknown function to calc principal components')
pc = pcaFunc(data, dim)
return (pc * asmatrix(makeCentered(data)).T).T
def makeCentered(data):
"""Move the mean of the data matrix into the origin.
Rows are perceived as datapoints.
"""
return data - data.mean(axis=0)
def pca(data, dim):
""" Return the first dim principal components as colums of a matrix.
Every row of the matrix resembles a point in the data space.
"""
assert dim <= data.shape[1], \
"dim must be less or equal than the original dimension"
# We have to make a copy of the original data and substract the mean
# of every entry
data = makeCentered(data)
cm = cov(data.T)
# OPT only calculate the dim first eigenvectors here
# The following calculation may seem a bit "weird" but also correct to me.
# The eigenvectors with the dim highest eigenvalues have to be selected
# We keep track of the indexes via enumerate to restore the right ordering
# later.
eigval, eigvec = eig(cm)
eigval = [(val, ind) for ind, val in enumerate(eigval)]
eigval.sort()
eigval[:-dim] = [] # remove all but the highest dim elements
# now we have to bring them back in the right order
eig_indexes = [(ind, val) for val, ind in eigval]
eig_indexes.sort(reverse=True)
eig_indexes = [ind for ind, val in eig_indexes]
return eigvec.take(eig_indexes, 1).T
def pPca(data, dim):
"""Return a matrix which contains the first `dim` dimensions principal
components of data.
data is a matrix which's rows correspond to datapoints. Implementation of
the 'probabilistic PCA' algorithm.
"""
num = data.shape[1]
data = asmatrix(makeCentered(data))
# Pick a random reduction
W = asmatrix(standard_normal((num, dim)))
# Save for convergence check
W_ = W[:]
while True:
E = inv(W.T * W) * W.T * data.T
W, W_ = data.T * E.T * inv(E * E.T), W
if abs(W - W_).max() < 0.001:
break
return W.T
| bsd-3-clause | 9954e705f7318676c77a5c6d0c82a80f | 28.625 | 78 | 0.654776 | 3.729614 | false | false | false | false |
pybrain2/pybrain2 | pybrain/supervised/trainers/backprop.py | 2 | 11689 | from __future__ import print_function
from scipy import dot, argmax
from random import shuffle
from math import isnan
from pybrain.supervised.trainers.trainer import Trainer
from pybrain.utilities import fListToString
from pybrain.auxiliary import GradientDescent
from pybrain.tools.functions import abs_error
__author__ = 'Daan Wierstra and Tom Schaul'
class BackpropTrainer(Trainer):
"""Trainer that trains the parameters of a module according to a supervised
dataset (potentially sequential) by backpropagating the errors (through
time)."""
def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
momentum=0., verbose=False, batchlearning=False,
weightdecay=0., errfun=None):
"""Create a BackpropTrainer to train the specified `module` on the
specified `dataset`.
The learning rate gives the ratio of which parameters are changed into
the direction of the gradient. The learning rate decreases by
`lrdecay`, which is used to to multiply the learning rate after each
training step. The parameters are also adjusted with respect to
`momentum`, which is the ratio by which the gradient of the last
timestep is used.
If `batchlearning` is set, the parameters are updated only at the end
of each epoch. Default is False.
`weightdecay` corresponds to the weightdecay rate, where 0 is no weight
decay at all.
Arguments:
errfun (func): Function that takes 2 positional arguments,
the target (true) and predicted (estimated) output vectors, and
returns an estimate of the signed distance to the target (true)
output. default = lambda targ, est: (targ - est))
"""
Trainer.__init__(self, module)
self.setData(dataset)
self.verbose = verbose
self.batchlearning = batchlearning
self.weightdecay = weightdecay
self.epoch = 0
self.totalepochs = 0
# set up gradient descender
self.descent = GradientDescent()
self.descent.alpha = learningrate
self.descent.momentum = momentum
self.descent.alphadecay = lrdecay
self.descent.init(module.params)
self.errfun = errfun or abs_error
def train(self):
"""Train the associated module for one epoch."""
assert len(self.ds) > 0, "Dataset cannot be empty."
self.module.resetDerivatives()
errors = 0
ponderation = 0.
shuffledSequences = []
for seq in self.ds._provideSequences():
shuffledSequences.append(seq)
shuffle(shuffledSequences)
for seq in shuffledSequences:
e, p = self._calcDerivs(seq)
errors += e
ponderation += p
if not self.batchlearning:
gradient = (self.module.derivs -
self.weightdecay * self.module.params)
new = self.descent(gradient, errors)
if new is not None:
self.module.params[:] = new
self.module.resetDerivatives()
if self.verbose:
print("Total error: {z: .12g}".format(z=errors / ponderation))
if self.batchlearning:
self.module._setParameters(self.descent(self.module.derivs))
self.epoch += 1
self.totalepochs += 1
return errors / ponderation
def _calcDerivs(self, seq):
"""Calculate error function and backpropagate output errors to yield
the gradient."""
self.module.reset()
for sample in seq:
self.module.activate(sample[0])
error = 0
ponderation = 0.
for offset, sample in reversed(list(enumerate(seq))):
# need to make a distinction here between datasets containing
# importance, and others
target = sample[1]
outerr = self.errfun(target, self.module.outputbuffer[offset])
if self.verbose > 1:
print('output error: {}'.format(outerr))
if len(sample) > 2:
importance = sample[2]
error += 0.5 * dot(importance, outerr ** 2)
ponderation += sum(importance)
self.module.backActivate(outerr * importance)
else:
error += 0.5 * sum(outerr ** 2)
ponderation += len(target)
# FIXME: the next line keeps arac from producing NaNs. I don't
# know why that is, but somehow the __str__ method of the
# ndarray class fixes something,
str(outerr)
self.module.backActivate(outerr)
if self.verbose > 1:
print('total error so far: {}'.format(error))
if self.verbose > 1:
print('TOTAL error: {}'.format(error))
return error, ponderation
def _checkGradient(self, dataset=None, silent=False):
"""Numeric check of the computed gradient for debugging purposes."""
if dataset:
self.setData(dataset)
res = []
for seq in self.ds._provideSequences():
self.module.resetDerivatives()
self._calcDerivs(seq)
e = 1e-6
analyticalDerivs = self.module.derivs.copy()
numericalDerivs = []
for p in range(self.module.paramdim):
storedoldval = self.module.params[p]
self.module.params[p] += e
righterror, dummy = self._calcDerivs(seq)
self.module.params[p] -= 2 * e
lefterror, dummy = self._calcDerivs(seq)
approxderiv = (righterror - lefterror) / (2 * e)
self.module.params[p] = storedoldval
numericalDerivs.append(approxderiv)
r = list(zip(analyticalDerivs, numericalDerivs))
res.append(r)
if not silent:
print(r)
return res
def testOnData(self, dataset=None, verbose=False):
"""Compute the MSE of the module performance on the given dataset.
If no dataset is supplied, the one passed upon Trainer initialization
is used."""
if dataset is None:
dataset = self.ds
dataset.reset()
if verbose:
print('\nTesting on data:')
errors = []
importances = []
ponderatedErrors = []
for seq in dataset._provideSequences():
self.module.reset()
e, i = dataset._evaluateSequence(self.module.activate, seq,
verbose)
importances.append(i)
errors.append(e)
ponderatedErrors.append(e / i)
if verbose:
print(('All errors:', ponderatedErrors))
assert sum(importances) > 0
avgErr = sum(errors) / sum(importances)
if verbose:
print(('Average error:', avgErr))
print(('Max error:', max(ponderatedErrors), 'Median error:',
sorted(ponderatedErrors)[len(errors) / 2]))
return avgErr
def testOnClassData(self, dataset=None, verbose=False,
return_targets=False):
"""Return winner-takes-all classification output on a given dataset.
If no dataset is given, the dataset passed during Trainer
initialization is used. If return_targets is set, also return
corresponding target classes.
"""
if dataset is None:
dataset = self.ds
dataset.reset()
out = []
targ = []
for seq in dataset._provideSequences():
self.module.reset()
for input, target in seq:
res = self.module.activate(input)
out.append(argmax(res))
targ.append(argmax(target))
if return_targets:
return out, targ
else:
return out
def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None,
continueEpochs=10, validationProportion=0.25,
trainingData=None, validationData=None,
convergence_threshold=10):
"""Train the module on the dataset until it converges.
Return the module with the parameters that gave the minimal validation
error.
If no dataset is given, the dataset passed during Trainer
initialization is used. validationProportion is the ratio of the
dataset that is used for the validation dataset.
If the training and validation data is already set, the splitPropotion
is ignored
If maxEpochs is given, at most that many epochs
are trained. Each time validation error hits a minimum, try for
continueEpochs epochs to find a better one."""
epochs = 0
if dataset is None:
dataset = self.ds
if verbose is None:
verbose = self.verbose
if trainingData is None or validationData is None:
# Split the dataset randomly: validationProportion of the samples
# for validation.
trainingData, validationData = (
dataset.splitWithProportion(1 - validationProportion))
if not (len(trainingData) > 0 and len(validationData)):
raise ValueError("Provided dataset too small to be split into "
"training and validation sets with proportion " +
str(validationProportion))
self.ds = trainingData
bestweights = self.module.params.copy()
bestverr = self.testOnData(validationData)
bestepoch = 0
self.trainingErrors = []
self.validationErrors = [bestverr]
while True:
trainingError = self.train()
validationError = self.testOnData(validationData)
if isnan(trainingError) or isnan(validationError):
raise Exception("Training produced NaN results")
self.trainingErrors.append(trainingError)
self.validationErrors.append(validationError)
if epochs == 0 or self.validationErrors[-1] < bestverr:
# one update is always done
bestverr = self.validationErrors[-1]
bestweights = self.module.params.copy()
bestepoch = epochs
if maxEpochs is not None and epochs >= maxEpochs:
self.module.params[:] = bestweights
break
epochs += 1
if len(self.validationErrors) >= continueEpochs * 2:
# have the validation errors started going up again?
# compare the average of the last few to the previous few
old = self.validationErrors[-continueEpochs * 2:- continueEpochs]
new = self.validationErrors[-continueEpochs:]
if min(new) > max(old):
self.module.params[:] = bestweights
break
lastnew = round(new[-1], convergence_threshold)
if sum(round(y, convergence_threshold) -
lastnew for y in new) == 0:
self.module.params[:] = bestweights
break
self.ds = dataset
if verbose:
print(('train-errors:', fListToString(self.trainingErrors, 6)))
print(('valid-errors:', fListToString(self.validationErrors, 6)))
# slice off the inital bestverr
return self.trainingErrors[:bestepoch], self.validationErrors[1:1 + bestepoch]
| bsd-3-clause | bf82f5c85a687d8887518e79718cd24b | 40.746429 | 86 | 0.583882 | 4.544712 | false | false | false | false |
pybrain2/pybrain2 | pybrain/rl/environments/twoplayergames/tasks/capturetask.py | 31 | 3766 | __author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.environments.episodic import EpisodicTask
from inspect import isclass
from pybrain.utilities import Named
from pybrain.rl.environments.twoplayergames import CaptureGame
from pybrain.rl.environments.twoplayergames.capturegameplayers import RandomCapturePlayer, ModuleDecidingPlayer
from pybrain.rl.environments.twoplayergames.capturegameplayers.captureplayer import CapturePlayer
from pybrain.structure.modules.module import Module
class CaptureGameTask(EpisodicTask, Named):
""" The task of winning the maximal number of capture games against a fixed opponent. """
# first game, opponent is black
opponentStart = True
# on subsequent games, starting players are alternating
alternateStarting = False
# numerical reward value attributed to winning
winnerReward = 1.
# coefficient determining the importance of long vs. short games w.r. to winning/losing
numMovesCoeff = 0.
# average over some games for evaluations
averageOverGames = 10
noisy = True
def __init__(self, size, opponent = None, **args):
EpisodicTask.__init__(self, CaptureGame(size))
self.setArgs(**args)
if opponent == None:
opponent = RandomCapturePlayer(self.env)
elif isclass(opponent):
# assume the agent can be initialized without arguments then.
opponent = opponent(self.env)
else:
opponent.game = self.env
if not self.opponentStart:
opponent.color = CaptureGame.WHITE
self.opponent = opponent
self.maxmoves = self.env.size * self.env.size
self.minmoves = 3
self.reset()
def reset(self):
self.switched = False
EpisodicTask.reset(self)
if self.opponent.color == CaptureGame.BLACK:
# first move by opponent
EpisodicTask.performAction(self, self.opponent.getAction())
def isFinished(self):
res = self.env.gameOver()
if res and self.alternateStarting and not self.switched:
# alternate starting player
self.opponent.color *= -1
self.switched = True
return res
def getReward(self):
""" Final positive reward for winner, negative for loser. """
if self.isFinished():
win = (self.env.winner != self.opponent.color)
moves = self.env.movesDone
res = self.winnerReward - self.numMovesCoeff * (moves -self.minmoves)/(self.maxmoves-self.minmoves)
if not win:
res *= -1
if self.alternateStarting and self.switched:
# opponent color has been inverted after the game!
res *= -1
return res
else:
return 0
def performAction(self, action):
EpisodicTask.performAction(self, action)
if not self.isFinished():
EpisodicTask.performAction(self, self.opponent.getAction())
def f(self, x):
""" If a module is given, wrap it into a ModuleDecidingAgent before evaluating it.
Also, if applicable, average the result over multiple games. """
if isinstance(x, Module):
agent = ModuleDecidingPlayer(x, self.env, greedySelection = True)
elif isinstance(x, CapturePlayer):
agent = x
else:
raise NotImplementedError('Missing implementation for '+x.__class__.__name__+' evaluation')
res = 0
agent.game = self.env
self.opponent.game = self.env
for _ in range(self.averageOverGames):
agent.color = -self.opponent.color
x = EpisodicTask.f(self, agent)
res += x
return res / float(self.averageOverGames)
| bsd-3-clause | 4d04451032ffdb586dff51afd9bee288 | 35.563107 | 111 | 0.641795 | 4.071351 | false | false | false | false |
cobrateam/splinter | splinter/driver/zopetestbrowser.py | 1 | 13544 | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from __future__ import unicode_literals
import mimetypes
import re
import time
import warnings
import lxml.html
from lxml.cssselect import CSSSelector
from zope.testbrowser.browser import Browser, ListControl
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
from splinter.driver import DriverAPI, ElementAPI
from splinter.driver.element_present import ElementPresentMixIn
from splinter.driver.find_links import FindLinks
from splinter.driver.xpath_utils import _concat_xpath_from_str
from splinter.cookie_manager import CookieManagerAPI
class CookieManager(CookieManagerAPI):
def add(self, cookie, **kwargs):
for key, value in cookie.items():
kwargs['name'] = key
kwargs['value'] = value
if key not in self.driver.cookies:
self.driver.cookies.create(**kwargs)
else:
self.driver.cookies.change(**kwargs)
def delete(self, *cookies):
if cookies:
for cookie in cookies:
try:
del self.driver.cookies[cookie]
except KeyError:
pass
else:
warnings.warn(
'Deleting all cookies via CookieManager.delete() with no arguments '
'has been deprecated. use CookieManager.delete_all().',
FutureWarning,
)
self.delete_all()
def delete_all(self):
self.driver.cookies.clearAll()
def all(self, verbose=False): # NOQA: A003
cookies = {}
for key, value in self.driver.cookies.items():
cookies[key] = value
return cookies
def __getitem__(self, item):
return self.driver.cookies[item]
def __contains__(self, key):
return key in self.driver.cookies
def __eq__(self, other_object):
if isinstance(other_object, dict):
return dict(self.driver.cookies) == other_object
return False
class ZopeTestBrowser(ElementPresentMixIn, DriverAPI):
driver_name = "zope.testbrowser"
def __init__(self, wait_time=2):
self.wait_time = wait_time
self._browser = Browser()
self._cookie_manager = CookieManager(self._browser)
self._last_urls = []
self.links = FindLinks(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def visit(self, url):
self._browser.open(url)
def back(self):
self._last_urls.insert(0, self.url)
self._browser.goBack()
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self._browser.reload()
def quit(self): # NOQA: A003
pass
@property
def htmltree(self):
try:
html = self.html.decode("utf-8")
except AttributeError:
html = self.html
return lxml.html.fromstring(html)
@property
def title(self):
return self._browser.title
@property
def html(self):
return self._browser.contents
@property
def url(self):
return self._browser.url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = self._browser.getControl(element.text)
return ElementList(
[ZopeTestBrowserOptionElement(control, self)], find_by="value", query=value
)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = self._browser.getControl(element.text)
return ElementList(
[ZopeTestBrowserOptionElement(control, self)], find_by="text", query=text
)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(
xpath, original_find="css", original_query=selector
)
def get_control(self, xpath_element):
return xpath_element
def find_by_xpath(self, xpath, original_find=None, original_query=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element) and xpath_element.name:
return self.find_by_name(xpath_element.name)
else:
elements.append(self.get_control(xpath_element))
find_by = original_find or "xpath"
query = original_query or xpath
return ElementList(
[ZopeTestBrowserElement(element, self) for element in elements],
find_by=find_by,
query=query,
)
def find_by_tag(self, tag):
return self.find_by_xpath(
"//%s" % tag, original_find="tag", original_query=tag
)
def find_by_value(self, value):
elem = self.find_by_xpath(
'//*[@value="%s"]' % value, original_find="value", original_query=value
)
if elem:
return elem
return self.find_by_xpath('//*[.="%s"]' % value)
def find_by_text(self, text):
xpath_str = _concat_xpath_from_str(text)
return self.find_by_xpath(
xpath_str,
original_find="text",
original_query=text,
)
def find_by_id(self, id_value):
return self.find_by_xpath(
'//*[@id="%s"][1]' % id_value,
original_find="id",
original_query=id_value,
)
def find_by_name(self, name):
elements = []
index = 0
while True:
try:
control = self._browser.getControl(name=name, index=index)
elements.append(control)
index += 1
except LookupError:
break
except NotImplementedError:
break
return ElementList(
[ZopeTestBrowserControlElement(element, self) for element in elements],
find_by="name",
query=name,
)
def fill(self, name, value):
self.find_by_name(name=name).first._control.value = value
def fill_form(self, field_values, form_id=None, name=None, ignore_missing=False):
form = self._browser
if name or form_id:
form = self._browser.getForm(name=name, id=form_id)
for name, value in field_values.items():
try:
control = form.getControl(name=name)
if control.type == "checkbox":
if value:
control.value = control.options
else:
control.value = []
elif control.type == "radio":
control.value = [
option for option in control.options if option == value
]
elif control.type == "select":
control.value = [value]
else:
control.value = value
except NotImplementedError as e:
if not ignore_missing:
raise NotImplementedError(e)
def choose(self, name, value):
control = self._browser.getControl(name=name)
control.value = [option for option in control.options if option == value]
def check(self, name):
control = self._browser.getControl(name=name)
control.value = control.options
def uncheck(self, name):
control = self._browser.getControl(name=name)
control.value = []
def attach_file(self, name, file_path):
filename = file_path.split("/")[-1]
control = self._browser.getControl(name=name)
content_type, _ = mimetypes.guess_type(file_path)
with open(file_path, 'rb') as f:
control.add_file(f, content_type, filename)
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList(
[ZopeTestBrowserLinkElement(link, self) for link in links],
find_by="xpath",
query=xpath,
)
def select(self, name, value):
self.find_by_name(name).first._control.value = [value]
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag("body").first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
def _element_is_link(self, element):
return element.tag == "a"
def _element_is_control(self, element):
return hasattr(element, "type")
@property
def cookies(self):
return self._cookie_manager
re_extract_inner_html = re.compile(r"^<[^<>]+>(.*)</[^<>]+>$")
class ZopeTestBrowserElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
def __getitem__(self, attr):
return self._element.attrib[attr]
def find_by_css(self, selector):
elements = self._element.cssselect(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_xpath(self, selector):
elements = self._element.xpath(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_name(self, name):
elements = self._element.cssselect('[name="%s"]' % name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_tag(self, name):
elements = self._element.cssselect(name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_value(self, value):
elements = self._element.cssselect('[value="%s"]' % value)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_text(self, text):
# Add a period to the xpath to search only inside the parent.
xpath_str = '.{}'.format(_concat_xpath_from_str(text))
return self.find_by_xpath(xpath_str)
def find_by_id(self, id): # NOQA: A002
elements = self._element.cssselect("#%s" % id)
return ElementList([self.__class__(element, self) for element in elements])
@property
def value(self):
return self._element.text_content()
@property
def text(self):
return self.value
@property
def outer_html(self):
return lxml.html.tostring(self._element, encoding="unicode").strip()
@property
def html(self):
return re_extract_inner_html.match(self.outer_html).group(1)
def has_class(self, class_name):
return len(self._element.find_class(class_name)) > 0
class ZopeTestBrowserLinkElement(ZopeTestBrowserElement):
def __init__(self, element, parent):
super(ZopeTestBrowserLinkElement, self).__init__(element, parent)
self._browser = parent._browser
def __getitem__(self, attr):
return super(ZopeTestBrowserLinkElement, self).__getitem__(attr)
def click(self):
return self._browser.open(self["href"])
class ZopeTestBrowserControlElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
try:
return getattr(self._control._control, attr)
except AttributeError:
return self._control._control.attrs[attr]
@property
def value(self):
value = self._control.value
if isinstance(self._control, ListControl) and len(value) == 1:
return value[0]
return value
@property
def checked(self):
return bool(self._control.value)
def click(self):
return self._control.click()
def fill(self, value):
self._control.value = value
def select(self, value):
self._control.value = [value]
class ZopeTestBrowserOptionElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return getattr(self._control, attr)
@property
def text(self):
return self._control.labels[0]
@property
def value(self):
return self._control.optionValue
@property
def selected(self):
return self._control.selected
| bsd-3-clause | bb54aa0fa4806c25d6923f5ebf2f999d | 29.367713 | 87 | 0.588231 | 4.106731 | false | false | false | false |
ahmia/ahmia-site | ahmia/ahmia/management/commands/cleanup_db.py | 1 | 1039 | from datetime import timedelta
from django.conf import settings
from django.core.management import BaseCommand
from django.utils import timezone
from ... import utils
from ...models import TorStats, I2PStats, SearchQuery, SearchResultsClick
class Command(BaseCommand):
def __init__(self):
super(Command, self).__init__()
self.days_to_keep = settings.USAGE_STATS_DAYS
def handle(self, *args, **options):
self.cleanup_stats_etc()
def cleanup_stats_etc(self):
# *Stats tables
oldest_day_to_keep = utils.timezone_today() - timedelta(days=self.days_to_keep)
TorStats.objects.filter(day__lt=oldest_day_to_keep).delete()
I2PStats.objects.filter(day__lt=oldest_day_to_keep).delete()
# SearchQueries and Clicks
oldest_datetime_to_keep = timezone.now() - timedelta(days=self.days_to_keep)
SearchQuery.objects.filter(updated__lt=oldest_datetime_to_keep).delete()
SearchResultsClick.objects.filter(updated__lt=oldest_datetime_to_keep).delete()
| bsd-3-clause | 7d226a562d0786c9ab39efa0be075728 | 36.107143 | 87 | 0.698749 | 3.632867 | false | false | false | false |
django-helpdesk/django-helpdesk | helpdesk/views/public.py | 1 | 9647 | """
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
views/public.py - All public facing views, eg non-staff (no authentication
required) views.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist, PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from helpdesk import settings as helpdesk_settings
from helpdesk.decorators import is_helpdesk_staff, protect_view
from helpdesk.lib import text_is_spam
from helpdesk.models import Queue, Ticket, UserSettings
from helpdesk.user import huser_from_request
import helpdesk.views.abstract_views as abstract_views
import helpdesk.views.staff as staff
from importlib import import_module
import logging
from urllib.parse import quote
logger = logging.getLogger(__name__)
def create_ticket(request, *args, **kwargs):
if is_helpdesk_staff(request.user):
return staff.CreateTicketView.as_view()(request, *args, **kwargs)
else:
return CreateTicketView.as_view()(request, *args, **kwargs)
class BaseCreateTicketView(abstract_views.AbstractCreateTicketMixin, FormView):
def get_form_class(self):
try:
the_module, the_form_class = helpdesk_settings.HELPDESK_PUBLIC_TICKET_FORM_CLASS.rsplit(
".", 1)
the_module = import_module(the_module)
the_form_class = getattr(the_module, the_form_class)
except Exception as e:
raise ImproperlyConfigured(
f"Invalid custom form class {helpdesk_settings.HELPDESK_PUBLIC_TICKET_FORM_CLASS}"
) from e
return the_form_class
def dispatch(self, *args, **kwargs):
request = self.request
if not request.user.is_authenticated and helpdesk_settings.HELPDESK_REDIRECT_TO_LOGIN_BY_DEFAULT:
return HttpResponseRedirect(reverse('login'))
if is_helpdesk_staff(request.user) or \
(request.user.is_authenticated and
helpdesk_settings.HELPDESK_ALLOW_NON_STAFF_TICKET_UPDATE):
try:
if request.user.usersettings_helpdesk.login_view_ticketlist:
return HttpResponseRedirect(reverse('helpdesk:list'))
else:
return HttpResponseRedirect(reverse('helpdesk:dashboard'))
except UserSettings.DoesNotExist:
return HttpResponseRedirect(reverse('helpdesk:dashboard'))
return super().dispatch(*args, **kwargs)
def get_initial(self):
initial_data = super().get_initial()
# add pre-defined data for public ticket
if hasattr(settings, 'HELPDESK_PUBLIC_TICKET_QUEUE'):
# get the requested queue; return an error if queue not found
try:
initial_data['queue'] = Queue.objects.get(
slug=settings.HELPDESK_PUBLIC_TICKET_QUEUE,
allow_public_submission=True
).id
except Queue.DoesNotExist as e:
logger.fatal(
"Public queue '%s' is configured as default but can't be found",
settings.HELPDESK_PUBLIC_TICKET_QUEUE
)
raise ImproperlyConfigured(
"Wrong public queue configuration") from e
if hasattr(settings, 'HELPDESK_PUBLIC_TICKET_PRIORITY'):
initial_data['priority'] = settings.HELPDESK_PUBLIC_TICKET_PRIORITY
if hasattr(settings, 'HELPDESK_PUBLIC_TICKET_DUE_DATE'):
initial_data['due_date'] = settings.HELPDESK_PUBLIC_TICKET_DUE_DATE
return initial_data
def get_form_kwargs(self, *args, **kwargs):
kwargs = super().get_form_kwargs(*args, **kwargs)
if '_hide_fields_' in self.request.GET:
kwargs['hidden_fields'] = self.request.GET.get(
'_hide_fields_', '').split(',')
kwargs['readonly_fields'] = self.request.GET.get(
'_readonly_fields_', '').split(',')
return kwargs
def form_valid(self, form):
request = self.request
if text_is_spam(form.cleaned_data['body'], request):
# This submission is spam. Let's not save it.
return render(request, template_name='helpdesk/public_spam.html')
else:
ticket = form.save(
user=self.request.user if self.request.user.is_authenticated else None)
try:
return HttpResponseRedirect('%s?ticket=%s&email=%s&key=%s' % (
reverse('helpdesk:public_view'),
ticket.ticket_for_url,
quote(ticket.submitter_email),
ticket.secret_key)
)
except ValueError:
# if someone enters a non-int string for the ticket
return HttpResponseRedirect(reverse('helpdesk:home'))
class CreateTicketIframeView(BaseCreateTicketView):
template_name = 'helpdesk/public_create_ticket_iframe.html'
@csrf_exempt
@xframe_options_exempt
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
if super().form_valid(form).status_code == 302:
return HttpResponseRedirect(reverse('helpdesk:success_iframe'))
class SuccessIframeView(TemplateView):
template_name = 'helpdesk/success_iframe.html'
@xframe_options_exempt
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class CreateTicketView(BaseCreateTicketView):
template_name = 'helpdesk/public_create_ticket.html'
def get_form(self, form_class=None):
form = super().get_form(form_class)
# Add the CSS error class to the form in order to better see them in
# the page
form.error_css_class = 'text-danger'
return form
class Homepage(CreateTicketView):
template_name = 'helpdesk/public_homepage.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['kb_categories'] = huser_from_request(
self.request).get_allowed_kb_categories()
return context
def search_for_ticket(request, error_message=None):
if hasattr(settings, 'HELPDESK_VIEW_A_TICKET_PUBLIC') and settings.HELPDESK_VIEW_A_TICKET_PUBLIC:
email = request.GET.get('email', None)
return render(request, 'helpdesk/public_view_form.html', {
'ticket': False,
'email': email,
'error_message': error_message,
'helpdesk_settings': helpdesk_settings,
})
else:
raise PermissionDenied(
"Public viewing of tickets without a secret key is forbidden.")
@protect_view
def view_ticket(request):
ticket_req = request.GET.get('ticket', None)
email = request.GET.get('email', None)
key = request.GET.get('key', '')
if not (ticket_req and email):
if ticket_req is None and email is None:
return search_for_ticket(request)
else:
return search_for_ticket(request, _('Missing ticket ID or e-mail address. Please try again.'))
queue, ticket_id = Ticket.queue_and_id_from_query(ticket_req)
try:
if hasattr(settings, 'HELPDESK_VIEW_A_TICKET_PUBLIC') and settings.HELPDESK_VIEW_A_TICKET_PUBLIC:
ticket = Ticket.objects.get(
id=ticket_id, submitter_email__iexact=email)
else:
ticket = Ticket.objects.get(
id=ticket_id, submitter_email__iexact=email, secret_key__iexact=key)
except (ObjectDoesNotExist, ValueError):
return search_for_ticket(request, _('Invalid ticket ID or e-mail address. Please try again.'))
if is_helpdesk_staff(request.user):
redirect_url = reverse('helpdesk:view', args=[ticket_id])
if 'close' in request.GET:
redirect_url += '?close'
return HttpResponseRedirect(redirect_url)
if 'close' in request.GET and ticket.status == Ticket.RESOLVED_STATUS:
from helpdesk.views.staff import update_ticket
# Trick the update_ticket() view into thinking it's being called with
# a valid POST.
request.POST = {
'new_status': Ticket.CLOSED_STATUS,
'public': 1,
'title': ticket.title,
'comment': _('Submitter accepted resolution and closed ticket'),
}
if ticket.assigned_to:
request.POST['owner'] = ticket.assigned_to.id
request.GET = {}
return update_ticket(request, ticket_id, public=True)
# redirect user back to this ticket if possible.
redirect_url = ''
if helpdesk_settings.HELPDESK_NAVIGATION_ENABLED:
redirect_url = reverse('helpdesk:view', args=[ticket_id])
return render(request, 'helpdesk/public_view_ticket.html', {
'key': key,
'mail': email,
'ticket': ticket,
'helpdesk_settings': helpdesk_settings,
'next': redirect_url,
})
def change_language(request):
return_to = ''
if 'return_to' in request.GET:
return_to = request.GET['return_to']
return render(request, 'helpdesk/public_change_language.html', {'next': return_to})
| bsd-3-clause | 849b25adf0750d28d1f976f435c4f1c6 | 37.899194 | 106 | 0.639577 | 4.058477 | false | false | false | false |
django-helpdesk/django-helpdesk | demo/demodesk/config/settings.py | 1 | 7279 | """
Django settings for django-helpdesk demodesk project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_crkn1+fnzu5$vns_-d+^ayiq%z4k*s!!ag0!mfy36(y!vrazd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# SECURITY WARNING: you probably want to configure your server
# to use HTTPS with secure cookies, then you'd want to set
# the following settings:
#
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SESSION_COOKIE_SECURE = True
#CSRF_COOKIE_SECURE = True
#
# We leave them commented out here because most likely for
# an internal demo you don't need such security, but please
# remember when setting up your own development / production server!
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
'bootstrap4form',
'account', # Required by pinax-teams
'pinax.invitations', # required by pinax-teams
'pinax.teams', # team support
'reversion', # required by pinax-teams
'helpdesk', # This is us!
'rest_framework', # required for the API
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo.demodesk.config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo.demodesk.config.wsgi.application'
# django-helpdesk configuration settings
# You can override django-helpdesk's defaults by redefining them here.
# To see what settings are available, see the docs/configuration.rst
# file for more information.
# Some common settings are below.
HELPDESK_DEFAULT_SETTINGS = {
'use_email_as_submitter': True,
'email_on_ticket_assign': True,
'email_on_ticket_change': True,
'login_view_ticketlist': True,
'email_on_ticket_apichange': True,
'preset_replies': True,
'tickets_per_page': 25
}
# Should the public web portal be enabled?
HELPDESK_PUBLIC_ENABLED = True
HELPDESK_VIEW_A_TICKET_PUBLIC = True
HELPDESK_SUBMIT_A_TICKET_PUBLIC = True
# Should the Knowledgebase be enabled?
HELPDESK_KB_ENABLED = True
HELPDESK_TICKETS_TIMELINE_ENABLED = True
# Allow users to change their passwords
HELPDESK_SHOW_CHANGE_PASSWORD = True
# Activate the API
HELPDESK_ACTIVATE_API_ENDPOINT = True
# Instead of showing the public web portal first,
# we can instead redirect users straight to the login page.
HELPDESK_REDIRECT_TO_LOGIN_BY_DEFAULT = False
LOGIN_URL = 'helpdesk:login'
LOGIN_REDIRECT_URL = 'helpdesk:home'
# Database
# - by default, we use SQLite3 for the demo, but you can also
# configure MySQL or PostgreSQL, see the docs for more:
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Sites
# - this allows hosting of more than one site from a single server,
# in practice you can probably just leave this default if you only
# host a single site, but read more in the docs:
# https://docs.djangoproject.com/en/1.11/ref/contrib/sites/
SITE_ID = 1
# Sessions
# https://docs.djangoproject.com/en/1.11/topics/http/sessions
SESSION_COOKIE_AGE = 86400 # = 1 day
# For better default security, set these cookie flags, but
# these are likely to cause problems when testing locally
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
#CSRF_COOKIE_HTTPONLY = True
#SESSION_COOKIE_HTTPONLY = True
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Email
# https://docs.djangoproject.com/en/1.11/topics/email/
# This demo uses the console backend, which simply prints emails to the console
# rather than actually sending them out.
DEFAULT_FROM_EMAIL = 'helpdesk@example.com'
SERVER_EMAIL = 'helpdesk@example.com'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# If you want to test sending real emails, uncomment and modify the following:
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_HOST = 'smtp.example.com'
#EMAIL_PORT = '25'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
# By default, django-helpdesk uses en, but other languages are also available.
# The most complete translations are: es-MX, ru, zh-Hans
# Contribute to our translations via Transifex if you can!
# See CONTRIBUTING.rst for more info.
LANGUAGE_CODE = 'en-US'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# static root needs to be defined in order to use collectstatic
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# MEDIA_ROOT is where media uploads are stored.
# We set this to a directory to host file attachments created
# with tickets.
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Fixtures
# https://docs.djangoproject.com/en/1.11/ref/settings/#std:setting-FIXTURE_DIRS
# - This is only necessary to make the demo project work, not needed for
# your own projects unless you make your own fixtures
FIXTURE_DIRS = [os.path.join(BASE_DIR, 'fixtures')]
# for Django 3.2+, set default for autofields:
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
try:
from .local_settings import *
except ImportError:
pass
| bsd-3-clause | d65ee46c15fef37afe5e5f72cd924b16 | 28.831967 | 91 | 0.716994 | 3.441608 | false | false | false | false |
lepture/flask-oauthlib | example/douban.py | 16 | 1583 | from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
douban = oauth.remote_app(
'douban',
consumer_key='0cfc3c5d9f873b1826f4b518de95b148',
consumer_secret='3e209e4f9ecf6a4a',
base_url='https://api.douban.com/',
request_token_url=None,
request_token_params={'scope': 'douban_basic_common,shuo_basic_r'},
access_token_url='https://www.douban.com/service/auth2/token',
authorize_url='https://www.douban.com/service/auth2/auth',
access_token_method='POST',
)
@app.route('/')
def index():
if 'douban_token' in session:
resp = douban.get('shuo/v2/statuses/home_timeline')
return jsonify(status=resp.status, data=resp.data)
return redirect(url_for('login'))
@app.route('/login')
def login():
return douban.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('douban_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
def authorized():
resp = douban.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['douban_token'] = (resp['access_token'], '')
return redirect(url_for('index'))
@douban.tokengetter
def get_douban_oauth_token():
return session.get('douban_token')
if __name__ == '__main__':
app.run()
| bsd-3-clause | ee74c82d8c27bee1c6aae0b4cc3f74fc | 25.383333 | 75 | 0.660139 | 3.122288 | false | false | false | false |
lepture/flask-oauthlib | flask_oauthlib/contrib/apps.py | 6 | 7551 | """
flask_oauthlib.contrib.apps
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The bundle of remote app factories for famous third platforms.
Usage::
from flask import Flask
from flask_oauthlib.client import OAuth
from flask_oauthlib.contrib.apps import github
app = Flask(__name__)
oauth = OAuth(app)
github.register_to(oauth, scope=['user:email'])
github.register_to(oauth, name='github2')
Of course, it requires consumer keys in your config::
GITHUB_CONSUMER_KEY = ''
GITHUB_CONSUMER_SECRET = ''
GITHUB2_CONSUMER_KEY = ''
GITHUB2_CONSUMER_SECRET = ''
Some apps with OAuth 1.0a such as Twitter could not accept the ``scope``
argument.
Contributed by: tonyseek
"""
import copy
from oauthlib.common import unicode_type, bytes_type
__all__ = ['douban', 'dropbox', 'facebook', 'github', 'google', 'linkedin',
'twitter', 'weibo']
class RemoteAppFactory(object):
"""The factory to create remote app and bind it to given extension.
:param default_name: the default name which be used for registering.
:param kwargs: the pre-defined kwargs.
:param docstring: the docstring of factory.
"""
def __init__(self, default_name, kwargs, docstring=''):
assert 'name' not in kwargs
assert 'register' not in kwargs
self.default_name = default_name
self.kwargs = kwargs
self._kwargs_processor = None
self.__doc__ = docstring.lstrip()
def register_to(self, oauth, name=None, **kwargs):
"""Creates a remote app and registers it."""
kwargs = self._process_kwargs(
name=(name or self.default_name), **kwargs)
return oauth.remote_app(**kwargs)
def create(self, oauth, **kwargs):
"""Creates a remote app only."""
kwargs = self._process_kwargs(
name=self.default_name, register=False, **kwargs)
return oauth.remote_app(**kwargs)
def kwargs_processor(self, fn):
"""Sets a function to process kwargs before creating any app."""
self._kwargs_processor = fn
return fn
def _process_kwargs(self, **kwargs):
final_kwargs = copy.deepcopy(self.kwargs)
# merges with pre-defined kwargs
final_kwargs.update(copy.deepcopy(kwargs))
# use name as app key
final_kwargs.setdefault('app_key', final_kwargs['name'].upper())
# processes by pre-defined function
if self._kwargs_processor is not None:
final_kwargs = self._kwargs_processor(**final_kwargs)
return final_kwargs
def make_scope_processor(default_scope):
def processor(**kwargs):
# request_token_params
scope = kwargs.pop('scope', [default_scope]) # default scope
if not isinstance(scope, (unicode_type, bytes_type)):
scope = ','.join(scope) # allows list-style scope
request_token_params = kwargs.setdefault('request_token_params', {})
request_token_params.setdefault('scope', scope) # doesn't override
return kwargs
return processor
douban = RemoteAppFactory('douban', {
'base_url': 'https://api.douban.com/v2/',
'request_token_url': None,
'access_token_url': 'https://www.douban.com/service/auth2/token',
'authorize_url': 'https://www.douban.com/service/auth2/auth',
'access_token_method': 'POST',
}, """
The OAuth app for douban.com API.
:param scope: optional. default: ``['douban_basic_common']``.
see also: http://developers.douban.com/wiki/?title=oauth2
""")
douban.kwargs_processor(make_scope_processor('douban_basic_common'))
dropbox = RemoteAppFactory('dropbox', {
'base_url': 'https://www.dropbox.com/1/',
'request_token_url': None,
'access_token_url': 'https://api.dropbox.com/1/oauth2/token',
'authorize_url': 'https://www.dropbox.com/1/oauth2/authorize',
'access_token_method': 'POST',
'request_token_params': {},
}, """The OAuth app for Dropbox API.""")
facebook = RemoteAppFactory('facebook', {
'request_token_params': {'scope': 'email'},
'base_url': 'https://graph.facebook.com',
'request_token_url': None,
'access_token_url': '/oauth/access_token',
'authorize_url': 'https://www.facebook.com/dialog/oauth',
}, """
The OAuth app for Facebook API.
:param scope: optional. default: ``['email']``.
""")
facebook.kwargs_processor(make_scope_processor('email'))
github = RemoteAppFactory('github', {
'base_url': 'https://api.github.com/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://github.com/login/oauth/access_token',
'authorize_url': 'https://github.com/login/oauth/authorize',
}, """
The OAuth app for GitHub API.
:param scope: optional. default: ``['user:email']``.
""")
github.kwargs_processor(make_scope_processor('user:email'))
google = RemoteAppFactory('google', {
'base_url': 'https://www.googleapis.com/oauth2/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
}, """
The OAuth app for Google API.
:param scope: optional.
default: ``['email']``.
""")
google.kwargs_processor(make_scope_processor(
'email'))
twitter = RemoteAppFactory('twitter', {
'base_url': 'https://api.twitter.com/1.1/',
'request_token_url': 'https://api.twitter.com/oauth/request_token',
'access_token_url': 'https://api.twitter.com/oauth/access_token',
'authorize_url': 'https://api.twitter.com/oauth/authenticate',
}, """The OAuth app for Twitter API.""")
weibo = RemoteAppFactory('weibo', {
'base_url': 'https://api.weibo.com/2/',
'authorize_url': 'https://api.weibo.com/oauth2/authorize',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://api.weibo.com/oauth2/access_token',
# since weibo's response is a shit, we need to force parse the content
'content_type': 'application/json',
}, """
The OAuth app for weibo.com API.
:param scope: optional. default: ``['email']``
""")
weibo.kwargs_processor(make_scope_processor('email'))
def change_weibo_header(uri, headers, body):
"""Since weibo is a rubbish server, it does not follow the standard,
we need to change the authorization header for it."""
auth = headers.get('Authorization')
if auth:
auth = auth.replace('Bearer', 'OAuth2')
headers['Authorization'] = auth
return uri, headers, body
weibo.pre_request = change_weibo_header
linkedin = RemoteAppFactory('linkedin', {
'request_token_params': {'state': 'RandomString'},
'base_url': 'https://api.linkedin.com/v1/',
'request_token_url': None,
'access_token_method': 'POST',
'access_token_url': 'https://www.linkedin.com/uas/oauth2/accessToken',
'authorize_url': 'https://www.linkedin.com/uas/oauth2/authorization',
}, """
The OAuth app for LinkedIn API.
:param scope: optional. default: ``['r_basicprofile']``
""")
linkedin.kwargs_processor(make_scope_processor('r_basicprofile'))
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
| bsd-3-clause | 87efb75a528885aace6e72467a7008dc | 31.973799 | 76 | 0.639518 | 3.550071 | false | false | false | false |
lepture/flask-oauthlib | flask_oauthlib/client.py | 1 | 24833 | # coding: utf-8
"""
flask_oauthlib.client
~~~~~~~~~~~~~~~~~~~~~
Implemnts OAuth1 and OAuth2 support for Flask.
:copyright: (c) 2013 - 2014 by Hsiaoming Yang.
"""
import logging
import oauthlib.oauth1
import oauthlib.oauth2
from copy import copy
from functools import wraps
from oauthlib.common import to_unicode, PY3, add_params_to_uri
from flask import request, redirect, json, session, current_app
from werkzeug.urls import url_quote, url_decode, url_encode
from werkzeug.http import parse_options_header
from werkzeug.utils import cached_property
from .utils import to_bytes
try:
from urlparse import urljoin
import urllib2 as http
except ImportError:
from urllib import request as http
from urllib.parse import urljoin
log = logging.getLogger('flask_oauthlib')
if PY3:
string_types = (str,)
else:
string_types = (str, unicode)
__all__ = ('OAuth', 'OAuthRemoteApp', 'OAuthResponse', 'OAuthException')
class OAuth(object):
"""Registry for remote applications.
:param app: the app instance of Flask
Create an instance with Flask::
oauth = OAuth(app)
"""
state_key = 'oauthlib.client'
def __init__(self, app=None):
self.remote_apps = {}
self.app = app
if app:
self.init_app(app)
def init_app(self, app):
"""Init app with Flask instance.
You can also pass the instance of Flask later::
oauth = OAuth()
oauth.init_app(app)
"""
self.app = app
app.extensions = getattr(app, 'extensions', {})
app.extensions[self.state_key] = self
def remote_app(self, name, register=True, **kwargs):
"""Registers a new remote application.
:param name: the name of the remote application
:param register: whether the remote app will be registered
Find more parameters from :class:`OAuthRemoteApp`.
"""
remote = OAuthRemoteApp(self, name, **kwargs)
if register:
assert name not in self.remote_apps
self.remote_apps[name] = remote
return remote
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
app = self.remote_apps.get(key)
if app:
return app
raise AttributeError('No such app: %s' % key)
_etree = None
def get_etree():
global _etree
if _etree is not None:
return _etree
try:
from lxml import etree as _etree
except ImportError:
try:
from xml.etree import cElementTree as _etree
except ImportError:
try:
from xml.etree import ElementTree as _etree
except ImportError:
raise TypeError('lxml or etree not found')
return _etree
def parse_response(resp, content, strict=False, content_type=None):
"""Parse the response returned by :meth:`OAuthRemoteApp.http_request`.
:param resp: response of http_request
:param content: content of the response
:param strict: strict mode for form urlencoded content
:param content_type: assign a content type manually
"""
if not content_type:
content_type = resp.headers.get('content-type', 'application/json')
ct, options = parse_options_header(content_type)
if ct in ('application/json', 'text/javascript'):
if not content:
return {}
return json.loads(content)
if ct in ('application/xml', 'text/xml'):
return get_etree().fromstring(content)
if ct != 'application/x-www-form-urlencoded' and strict:
return content
charset = options.get('charset', 'utf-8')
return url_decode(content, charset=charset).to_dict()
def prepare_request(uri, headers=None, data=None, method=None):
"""Make request parameters right."""
if headers is None:
headers = {}
if data and not method:
method = 'POST'
elif not method:
method = 'GET'
if method == 'GET' and data:
uri = add_params_to_uri(uri, data)
data = None
return uri, headers, data, method
def encode_request_data(data, format):
if format is None:
return data, None
if format == 'json':
return json.dumps(data or {}), 'application/json'
if format == 'urlencoded':
return url_encode(data or {}), 'application/x-www-form-urlencoded'
raise TypeError('Unknown format %r' % format)
class OAuthResponse(object):
def __init__(self, resp, content, content_type=None):
self._resp = resp
self.raw_data = content
self.data = parse_response(
resp, content, strict=True,
content_type=content_type,
)
@property
def status(self):
"""The status code of the response."""
return self._resp.code
class OAuthException(RuntimeError):
def __init__(self, message, type=None, data=None):
self.message = message
self.type = type
self.data = data
def __str__(self):
if PY3:
return self.message
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class OAuthRemoteApp(object):
"""Represents a remote application.
:param oauth: the associated :class:`OAuth` object
:param name: the name of the remote application
:param base_url: the base url for every request
:param request_token_url: the url for requesting new tokens
:param access_token_url: the url for token exchange
:param authorize_url: the url for authorization
:param consumer_key: the application specific consumer key
:param consumer_secret: the application specific consumer secret
:param request_token_params: an optional dictionary of parameters
to forward to the request token url
or authorize url depending on oauth
version
:param request_token_method: the HTTP method that should be used for
the access_token_url. Default is ``GET``
:param access_token_params: an optional dictionary of parameters to
forward to the access token url
:param access_token_method: the HTTP method that should be used for
the access_token_url. Default is ``GET``
:param access_token_headers: additonal headers that should be used for
the access_token_url.
:param content_type: force to parse the content with this content_type,
usually used when the server didn't return the
right content type.
.. versionadded:: 0.3.0
:param app_key: lazy load configuration from Flask app config with
this app key
"""
def __init__(
self, oauth, name,
base_url=None,
request_token_url=None,
access_token_url=None,
authorize_url=None,
consumer_key=None,
consumer_secret=None,
rsa_key=None,
signature_method=None,
request_token_params=None,
request_token_method=None,
access_token_params=None,
access_token_method=None,
access_token_headers=None,
content_type=None,
app_key=None,
encoding='utf-8',
):
self.oauth = oauth
self.name = name
self._base_url = base_url
self._request_token_url = request_token_url
self._access_token_url = access_token_url
self._authorize_url = authorize_url
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._rsa_key = rsa_key
self._signature_method = signature_method
self._request_token_params = request_token_params
self._request_token_method = request_token_method
self._access_token_params = access_token_params
self._access_token_method = access_token_method
self._access_token_headers = access_token_headers or {}
self._content_type = content_type
self._tokengetter = None
self.app_key = app_key
self.encoding = encoding
# Check for required authentication information.
# Skip this check if app_key is specified, since the information is
# specified in the Flask config, instead.
if not app_key:
if signature_method == oauthlib.oauth1.SIGNATURE_RSA:
# check for consumer_key and rsa_key
if not consumer_key or not rsa_key:
raise TypeError(
"OAuthRemoteApp with RSA authentication requires "
"consumer key and rsa key"
)
else:
# check for consumer_key and consumer_secret
if not consumer_key or not consumer_secret:
raise TypeError(
"OAuthRemoteApp requires consumer key and secret"
)
@cached_property
def base_url(self):
return self._get_property('base_url')
@cached_property
def request_token_url(self):
return self._get_property('request_token_url', None)
@cached_property
def access_token_url(self):
return self._get_property('access_token_url')
@cached_property
def authorize_url(self):
return self._get_property('authorize_url')
@cached_property
def consumer_key(self):
return self._get_property('consumer_key')
@cached_property
def consumer_secret(self):
return self._get_property('consumer_secret')
@cached_property
def rsa_key(self):
return self._get_property('rsa_key')
@cached_property
def signature_method(self):
return self._get_property('signature_method')
@cached_property
def request_token_params(self):
return self._get_property('request_token_params', {})
@cached_property
def request_token_method(self):
return self._get_property('request_token_method', 'GET')
@cached_property
def access_token_params(self):
return self._get_property('access_token_params', {})
@cached_property
def access_token_method(self):
return self._get_property('access_token_method', 'POST')
@cached_property
def content_type(self):
return self._get_property('content_type', None)
def _get_property(self, key, default=False):
attr = getattr(self, '_%s' % key)
if attr is not None:
return attr
if not self.app_key:
if default is not False:
return default
return attr
app = self.oauth.app or current_app
if self.app_key in app.config:
# works with dict config
config = app.config[self.app_key]
if default is not False:
return config.get(key, default)
return config[key]
# works with plain text config
config_key = "%s_%s" % (self.app_key, key.upper())
if default is not False:
return app.config.get(config_key, default)
return app.config[config_key]
def get_oauth1_client_params(self, token):
params = copy(self.request_token_params) or {}
if token and isinstance(token, (tuple, list)):
params["resource_owner_key"] = token[0]
params["resource_owner_secret"] = token[1]
# Set params for SIGNATURE_RSA
if self.signature_method == oauthlib.oauth1.SIGNATURE_RSA:
params["signature_method"] = self.signature_method
params["rsa_key"] = self.rsa_key
return params
def make_client(self, token=None):
# request_token_url is for oauth1
if self.request_token_url:
# get params for client
params = self.get_oauth1_client_params(token)
client = oauthlib.oauth1.Client(
client_key=self.consumer_key,
client_secret=self.consumer_secret,
**params
)
else:
if token:
if isinstance(token, (tuple, list)):
token = {'access_token': token[0]}
elif isinstance(token, string_types):
token = {'access_token': token}
client = oauthlib.oauth2.WebApplicationClient(
self.consumer_key, token=token
)
return client
@staticmethod
def http_request(uri, headers=None, data=None, method=None):
uri, headers, data, method = prepare_request(
uri, headers, data, method
)
log.debug('Request %r with %r method' % (uri, method))
req = http.Request(uri, headers=headers, data=data)
req.get_method = lambda: method.upper()
try:
resp = http.urlopen(req)
content = resp.read()
resp.close()
return resp, content
except http.HTTPError as resp:
content = resp.read()
resp.close()
return resp, content
def get(self, *args, **kwargs):
"""Sends a ``GET`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'GET'
return self.request(*args, **kwargs)
def post(self, *args, **kwargs):
"""Sends a ``POST`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'POST'
return self.request(*args, **kwargs)
def put(self, *args, **kwargs):
"""Sends a ``PUT`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'PUT'
return self.request(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Sends a ``DELETE`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'DELETE'
return self.request(*args, **kwargs)
def patch(self, *args, **kwargs):
"""Sends a ``PATCH`` request. Accepts the same parameters as
:meth:`post`.
"""
kwargs['method'] = 'PATCH'
return self.request(*args, **kwargs)
def request(self, url, data=None, headers=None, format='urlencoded',
method='GET', content_type=None, token=None):
"""
Sends a request to the remote server with OAuth tokens attached.
:param data: the data to be sent to the server.
:param headers: an optional dictionary of headers.
:param format: the format for the `data`. Can be `urlencoded` for
URL encoded data or `json` for JSON.
:param method: the HTTP request method to use.
:param content_type: an optional content type. If a content type
is provided, the data is passed as it, and
the `format` is ignored.
:param token: an optional token to pass, if it is None, token will
be generated by tokengetter.
"""
headers = dict(headers or {})
if token is None:
token = self.get_request_token()
client = self.make_client(token)
url = self.expand_url(url)
if method == 'GET':
assert format == 'urlencoded'
if data:
url = add_params_to_uri(url, data)
data = None
else:
if content_type is None:
data, content_type = encode_request_data(data, format)
if content_type is not None:
headers['Content-Type'] = content_type
if self.request_token_url:
# oauth1
uri, headers, body = client.sign(
url, http_method=method, body=data, headers=headers
)
else:
# oauth2
uri, headers, body = client.add_token(
url, http_method=method, body=data, headers=headers
)
if hasattr(self, 'pre_request'):
# This is designed for some rubbish services like weibo.
# Since they don't follow the standards, we need to
# change the uri, headers, or body.
uri, headers, body = self.pre_request(uri, headers, body)
if body:
data = to_bytes(body, self.encoding)
else:
data = None
resp, content = self.http_request(
uri, headers, data=to_bytes(body, self.encoding), method=method
)
return OAuthResponse(resp, content, self.content_type)
def authorize(self, callback=None, state=None, **kwargs):
"""
Returns a redirect response to the remote authorization URL with
the signed callback given.
:param callback: a redirect url for the callback
:param state: an optional value to embed in the OAuth request.
Use this if you want to pass around application
state (e.g. CSRF tokens).
:param kwargs: add optional key/value pairs to the query string
"""
params = dict(self.request_token_params) or {}
params.update(**kwargs)
if self.request_token_url:
token = self.generate_request_token(callback)[0]
url = '%s?oauth_token=%s' % (
self.expand_url(self.authorize_url), url_quote(token)
)
if params:
url += '&' + url_encode(params)
else:
assert callback is not None, 'Callback is required for OAuth2'
client = self.make_client()
if 'scope' in params:
scope = params.pop('scope')
else:
scope = None
if isinstance(scope, str):
# oauthlib need unicode
scope = _encode(scope, self.encoding)
if 'state' in params:
if not state:
state = params.pop('state')
else:
# remove state in params
params.pop('state')
if callable(state):
# state can be function for generate a random string
state = state()
session['%s_oauthredir' % self.name] = callback
url = client.prepare_request_uri(
self.expand_url(self.authorize_url),
redirect_uri=callback,
scope=scope,
state=state,
**params
)
return redirect(url)
def tokengetter(self, f):
"""
Register a function as token getter.
"""
self._tokengetter = f
return f
def expand_url(self, url):
return urljoin(self.base_url, url)
def generate_request_token(self, callback=None):
# for oauth1 only
if callback is not None:
callback = urljoin(request.url, callback)
client = self.make_client()
client.callback_uri = _encode(callback, self.encoding)
realm = self.request_token_params.get('realm')
realms = self.request_token_params.get('realms')
if not realm and realms:
realm = ' '.join(realms)
uri, headers, _ = client.sign(
self.expand_url(self.request_token_url),
http_method=self.request_token_method,
realm=realm,
)
log.debug('Generate request token header %r', headers)
resp, content = self.http_request(
uri, headers, method=self.request_token_method,
)
data = parse_response(resp, content)
if not data:
raise OAuthException(
'Invalid token response from %s' % self.name,
type='token_generation_failed'
)
if resp.code not in (200, 201):
message = 'Failed to generate request token'
if 'oauth_problem' in data:
message += ' (%s)' % data['oauth_problem']
raise OAuthException(
message,
type='token_generation_failed',
data=data,
)
tup = (data['oauth_token'], data['oauth_token_secret'])
session['%s_oauthtok' % self.name] = tup
return tup
def get_request_token(self):
assert self._tokengetter is not None, 'missing tokengetter'
rv = self._tokengetter()
if rv is None:
raise OAuthException('No token available', type='token_missing')
return rv
def handle_oauth1_response(self, args):
"""Handles an oauth1 authorization response."""
client = self.make_client()
client.verifier = args.get('oauth_verifier')
tup = session.get('%s_oauthtok' % self.name)
if not tup:
raise OAuthException(
'Token not found, maybe you disabled cookie',
type='token_not_found'
)
client.resource_owner_key = tup[0]
client.resource_owner_secret = tup[1]
uri, headers, data = client.sign(
self.expand_url(self.access_token_url),
_encode(self.access_token_method)
)
headers.update(self._access_token_headers)
resp, content = self.http_request(
uri, headers, to_bytes(data, self.encoding),
method=self.access_token_method
)
data = parse_response(resp, content)
if resp.code not in (200, 201):
raise OAuthException(
'Invalid response from %s' % self.name,
type='invalid_response', data=data
)
return data
def handle_oauth2_response(self, args):
"""Handles an oauth2 authorization response."""
client = self.make_client()
remote_args = {
'code': args.get('code'),
'client_secret': self.consumer_secret,
'redirect_uri': session.get('%s_oauthredir' % self.name)
}
log.debug('Prepare oauth2 remote args %r', remote_args)
remote_args.update(self.access_token_params)
headers = copy(self._access_token_headers)
if self.access_token_method == 'POST':
headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
body = client.prepare_request_body(**remote_args)
resp, content = self.http_request(
self.expand_url(self.access_token_url),
headers=headers,
data=to_bytes(body, self.encoding),
method=self.access_token_method,
)
elif self.access_token_method == 'GET':
qs = client.prepare_request_body(**remote_args)
url = self.expand_url(self.access_token_url)
url += ('?' in url and '&' or '?') + qs
resp, content = self.http_request(
url,
headers=headers,
method=self.access_token_method,
)
else:
raise OAuthException(
'Unsupported access_token_method: %s' %
self.access_token_method
)
data = parse_response(resp, content, content_type=self.content_type)
if resp.code not in (200, 201):
raise OAuthException(
'Invalid response from %s' % self.name,
type='invalid_response', data=data
)
return data
def handle_unknown_response(self):
"""Handles a unknown authorization response."""
return None
def authorized_response(self, args=None):
"""Handles authorization response smartly."""
if args is None:
args = request.args
if 'oauth_verifier' in args:
data = self.handle_oauth1_response(args)
elif 'code' in args:
data = self.handle_oauth2_response(args)
else:
data = self.handle_unknown_response()
# free request token
session.pop('%s_oauthtok' % self.name, None)
session.pop('%s_oauthredir' % self.name, None)
return data
def authorized_handler(self, f):
"""Handles an OAuth callback.
.. versionchanged:: 0.7
@authorized_handler is deprecated in favor of authorized_response.
"""
@wraps(f)
def decorated(*args, **kwargs):
log.warn(
'@authorized_handler is deprecated in favor of '
'authorized_response'
)
data = self.authorized_response()
return f(*((data,) + args), **kwargs)
return decorated
def _encode(text, encoding='utf-8'):
if encoding:
return to_unicode(text, encoding)
return text
| bsd-3-clause | 8de9cec9d8f243135c251d91208bd3b6 | 32.694708 | 81 | 0.571337 | 4.33537 | false | false | false | false |
lepture/flask-oauthlib | example/google.py | 5 | 1864 | """
google example
~~~~~~~~~~~~~~
This example is contributed by Bruno Rocha
GitHub: https://github.com/rochacbruno
"""
from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.config['GOOGLE_ID'] = "cloud.google.com/console and get your ID"
app.config['GOOGLE_SECRET'] = "cloud.google.com/console and get the secret"
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
google = oauth.remote_app(
'google',
consumer_key=app.config.get('GOOGLE_ID'),
consumer_secret=app.config.get('GOOGLE_SECRET'),
request_token_params={
'scope': 'email'
},
base_url='https://www.googleapis.com/oauth2/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth',
)
@app.route('/')
def index():
if 'google_token' in session:
me = google.get('userinfo')
return jsonify({"data": me.data})
return redirect(url_for('login'))
@app.route('/login')
def login():
return google.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('google_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
def authorized():
resp = google.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['google_token'] = (resp['access_token'], '')
me = google.get('userinfo')
return jsonify({"data": me.data})
@google.tokengetter
def get_google_oauth_token():
return session.get('google_token')
if __name__ == '__main__':
app.run()
| bsd-3-clause | b5578256bcf2c7ac691fe8be0bcdc127 | 24.534247 | 75 | 0.642167 | 3.445471 | false | true | false | false |
lepture/flask-oauthlib | example/linkedin.py | 16 | 2007 | from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
linkedin = oauth.remote_app(
'linkedin',
consumer_key='k8fhkgkkqzub',
consumer_secret='ZZtLETQOQYNDjMrz',
request_token_params={
'scope': 'r_basicprofile',
'state': 'RandomString',
},
base_url='https://api.linkedin.com/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',
authorize_url='https://www.linkedin.com/uas/oauth2/authorization',
)
@app.route('/')
def index():
if 'linkedin_token' in session:
me = linkedin.get('people/~')
return jsonify(me.data)
return redirect(url_for('login'))
@app.route('/login')
def login():
return linkedin.authorize(callback=url_for('authorized', _external=True))
@app.route('/logout')
def logout():
session.pop('linkedin_token', None)
return redirect(url_for('index'))
@app.route('/login/authorized')
def authorized():
resp = linkedin.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['linkedin_token'] = (resp['access_token'], '')
me = linkedin.get('people/~')
return jsonify(me.data)
@linkedin.tokengetter
def get_linkedin_oauth_token():
return session.get('linkedin_token')
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
if __name__ == '__main__':
app.run()
| bsd-3-clause | 440e9e905ecca293298c2ec0b747074e | 24.730769 | 77 | 0.63129 | 3.378788 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/runners/ctc_debug_runner.py | 1 | 1826 | from typing import Dict, List
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.runners.base_runner import BaseRunner
from neuralmonkey.decoders.ctc_decoder import CTCDecoder
from neuralmonkey.decorators import tensor
class CTCDebugRunner(BaseRunner[CTCDecoder]):
"""A runner that print out raw CTC output including the blank symbols."""
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["CTCDebugRunner"]):
def collect_results(self, results: List[Dict]) -> None:
vocabulary = self.executor.decoder.vocabulary
if len(results) != 1:
raise RuntimeError("CTCDebugRunners do not support ensembles.")
logits = results[0]["logits"]
argmaxes = np.argmax(logits, axis=2).T
decoded_batch = []
for indices in argmaxes:
decoded_instance = []
for index in indices:
if index == len(vocabulary):
symbol = "<BLANK>"
else:
symbol = vocabulary.index_to_word[index]
decoded_instance.append(symbol)
decoded_batch.append(decoded_instance)
self.set_runner_result(outputs=decoded_batch, losses=[])
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: CTCDecoder) -> None:
check_argument_types()
super().__init__(output_series, decoder)
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"logits": self.decoder.logits}
@property
def loss_names(self) -> List[str]:
return []
| bsd-3-clause | 2933ee68d837f15c4ee464ed58aee44b | 32.814815 | 79 | 0.608434 | 4.296471 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/processors/speech.py | 2 | 2025 | from typing import Callable
import numpy as np
from python_speech_features import mfcc, fbank, logfbank, ssc, delta
from neuralmonkey.readers.audio_reader import Audio
# pylint: disable=invalid-name
def SpeechFeaturesPreprocessor(feature_type: str = "mfcc",
delta_order: int = 0,
delta_window: int = 2,
**kwargs) -> Callable:
"""Calculate speech features.
First, the given type of features (e.g. MFCC) is computed using a window
of length `winlen` and step `winstep`; for additional keyword arguments
(specific to each feature type), see
http://python-speech-features.readthedocs.io/. Then, delta features up to
`delta_order` are added.
By default, 13 MFCCs per frame are computed. To add delta and delta-delta
features (resulting in 39 coefficients per frame), set `delta_order=2`.
Arguments:
feature_type: mfcc, fbank, logfbank or ssc (default is mfcc)
delta_order: maximum order of the delta features (default is 0)
delta_window: window size for delta features (default is 2)
**kwargs: keyword arguments for the appropriate function from
python_speech_features
Returns:
A numpy array of shape [num_frames, num_features].
"""
if feature_type not in FEATURE_TYPES:
raise ValueError(
"Unknown speech feature type '{}'".format(feature_type))
def preprocess(audio: Audio) -> np.ndarray:
features = [FEATURE_TYPES[feature_type](
audio.data, samplerate=audio.rate, **kwargs)]
for _ in range(delta_order):
features.append(delta(features[-1], delta_window))
return np.concatenate(features, axis=1)
return preprocess
def _fbank(*args, **kwargs) -> np.ndarray:
feat, _ = fbank(*args, **kwargs)
return feat
FEATURE_TYPES = {"mfcc": mfcc,
"fbank": _fbank,
"logfbank": logfbank,
"ssc": ssc}
| bsd-3-clause | 61a824d082b9fdd1586ed54073eb50f7 | 32.75 | 77 | 0.627654 | 4.05 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/attention/base_attention.py | 1 | 8051 | """Decoding functions using multiple attentions for RNN decoders.
See http://arxiv.org/abs/1606.07481
The attention mechanisms used in Neural Monkey are inherited from the
``BaseAttention`` class defined in this module.
The attention function can be viewed as a soft lookup over an associative
memory. The *query* vector is used to compute a similarity score of the *keys*
of the associative memory and the resulting scores are used as weights in a
weighted sum of the *values* associated with the keys. We call the
(unnormalized) similarity scores *energies*, we call *attention distribution*
the energies after (softmax) normalization, and we call the resulting
weighted sum of states a *context vector*.
Note that it is possible (and true in most cases) that the attention keys
are equal to the values. In case of self-attention, even queries are from the
same set of vectors.
To abstract over different flavors of attention mechanism, we conceptualize the
procedure as follows: Each attention object has the ``attention`` function
which operates on the query tensor. The attention function receives the query
tensor (the decoder state) and optionally the previous state of the decoder,
and computes the context vector. The function also receives a *loop state*,
which is used to store data in an autoregressive loop that generates a
sequence.
The attention uses the loop state to store to store attention distributions
and context vectors in time. This structure is called ``AttentionLoopState``.
To be able to initialize the loop state, each attention object that uses this
feature defines the ``initial_loop_state`` function with empty tensors.
Since there can be many *modes* in which the decoder that uses the attention
operates, the attention objects have the ``finalize_loop`` method, which takes
the last attention loop state and the name of the mode (a string) and processes
this data to be available in the ``histories`` dictionary. The single and most
used example of two *modes* are the *train* and *runtime* modes of the
autoregressive decoder.
"""
from typing import Dict, Optional, Any, Tuple, Union
import tensorflow as tf
from neuralmonkey.attention.namedtuples import AttentionLoopState
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.model.stateful import TemporalStateful, SpatialStateful
# pylint: disable=invalid-name
Attendable = Union[TemporalStateful, SpatialStateful]
# pylint: enable=invalid-name
def empty_attention_loop_state(
batch_size: Union[int, tf.Tensor],
length: Union[int, tf.Tensor],
dimension: Union[int, tf.Tensor]) -> AttentionLoopState:
"""Create an empty attention loop state.
The attention loop state is a technical object for storing the attention
distributions and the context vectors in time. It is used with the
``tf.while_loop`` dynamic implementation of decoders.
Arguments:
batch_size: The size of the batch.
length: The number of encoder states (keys).
dimension: The dimension of the context vector
Returns:
This function returns an empty attention loop state which means
there are two empty Tensors one for attention distributions in time,
and one for the attention context vectors in time.
"""
return AttentionLoopState(
contexts=tf.zeros(shape=[0, batch_size, dimension], name="contexts"),
weights=tf.zeros(shape=[0, batch_size, length], name="distributions"))
def get_attention_states(encoder: Attendable) -> tf.Tensor:
"""Return the temporal or spatial states of an encoder.
Arguments:
encoder: The encoder with the states to attend.
Returns:
Either a 3D or a 4D tensor, depending on whether the encoder is
temporal (e.g. recurrent encoder) or spatial (e.g. a CNN encoder).
The first two dimensions are (batch, time).
"""
if isinstance(encoder, TemporalStateful):
return encoder.temporal_states
if isinstance(encoder, SpatialStateful):
shape = encoder.spatial_states.get_shape().as_list()
return tf.reshape(encoder.spatial_states,
[-1, shape[1] * shape[2], shape[3]])
raise TypeError("Unknown encoder type")
def get_attention_mask(encoder: Attendable) -> Optional[tf.Tensor]:
"""Return the temporal or spatial mask of an encoder.
Arguments:
encoder: The encoder to get the mask from.
Returns:
Either a 2D or a 3D tensor, depending on whether the encoder is
temporal (e.g. recurrent encoder) or spatial (e.g. a CNN encoder).
"""
if isinstance(encoder, TemporalStateful):
if encoder.temporal_mask is None:
raise ValueError("The encoder temporal mask should not be none")
return encoder.temporal_mask
if isinstance(encoder, SpatialStateful):
if encoder.spatial_mask is None:
return None
shape = encoder.spatial_states.get_shape().as_list()
return tf.reshape(encoder.spatial_mask, [-1, shape[1] * shape[2]])
raise TypeError("Unknown encoder type")
class BaseAttention(ModelPart):
"""The abstract class for the attenion mechanism flavors."""
def __init__(self,
name: str,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Create a new ``BaseAttention`` object."""
ModelPart.__init__(
self, name, reuse, save_checkpoint, load_checkpoint, initializers)
self.query_state_size = None # type: tf.Tensor
self._histories = {} # type: Dict[str, tf.Tensor]
@property
def histories(self) -> Dict[str, tf.Tensor]:
"""Return the attention histories dictionary.
Use this property after it has been populated.
Returns:
The attention histories dictionary.
"""
return self._histories
def attention(self,
query: tf.Tensor,
decoder_prev_state: tf.Tensor,
decoder_input: tf.Tensor,
loop_state: Any) -> Tuple[tf.Tensor, Any]:
"""Get context vector for a given query."""
raise NotImplementedError("Abstract method")
def initial_loop_state(self) -> Any:
"""Get initial loop state for the attention object.
Returns:
The newly created initial loop state object.
"""
raise NotImplementedError("Abstract method")
def finalize_loop(self, key: str, last_loop_state: Any) -> None:
"""Store the attention histories from loop state under a given key.
Arguments:
key: The key to the histories dictionary to store the data in.
last_loop_state: The loop state object from the last state of
the decoding loop.
"""
raise NotImplementedError("Abstract method")
@property
def context_vector_size(self) -> int:
"""Return the static size of the context vector.
Returns:
An integer specifying the context vector dimension.
"""
raise NotImplementedError("Abstract property")
def visualize_attention(self, key: str, max_outputs: int = 16) -> None:
"""Include the attention histories under a given key into a summary.
Arguments:
key: The key to the attention histories dictionary.
max_outputs: Maximum number of images to save.
"""
if key not in self.histories:
raise KeyError(
"Key {} not among attention histories".format(key))
alignments = tf.expand_dims(
tf.transpose(self.histories[key], perm=[1, 2, 0]), -1)
summary_name = "{}.{}".format(self.name, key)
tf.summary.image(
summary_name, alignments, collections=["summary_att_plots"],
max_outputs=max_outputs)
| bsd-3-clause | f15e8306ec4adf5e1b15dbd18f787c89 | 38.273171 | 79 | 0.680164 | 4.397051 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/processors/helpers.py | 1 | 1266 | from typing import Any, Callable, Generator, List
from random import randint
def preprocess_char_based(sentence: List[str]) -> List[str]:
return list(" ".join(sentence))
def preprocess_add_noise(sentence: List[str]) -> List[str]:
sent = sentence[:]
length = len(sentence)
if length > 1:
for _ in range(length // 2):
swap = randint(0, length - 2)
sent[swap] = sent[swap + 1]
sent[swap + 1] = sent[swap]
return sent
# TODO refactor post-processors to work on sentence level
def postprocess_char_based(sentences: List[List[str]]) -> List[List[str]]:
result = []
for sentence in sentences:
joined = "".join(sentence)
tokenized = joined.split(" ")
result.append(tokenized)
return result
def untruecase(
sentences: List[List[str]]) -> Generator[List[str], None, None]:
for sentence in sentences:
if sentence:
yield [sentence[0].capitalize()] + sentence[1:]
else:
yield []
def pipeline(processors: List[Callable]) -> Callable:
"""Concatenate processors."""
def process(data: Any) -> Any:
for processor in processors:
data = processor(data)
return data
return process
| bsd-3-clause | 7272700bbac13b5da5c6a041f172d878 | 24.836735 | 74 | 0.605845 | 4.044728 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/runners/regression_runner.py | 1 | 1912 | from typing import Dict, List, Callable
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decoders.sequence_regressor import SequenceRegressor
from neuralmonkey.decorators import tensor
from neuralmonkey.runners.base_runner import BaseRunner
# pylint: disable=invalid-name
Postprocessor = Callable[[List[float]], List[float]]
# pylint: enable=invalid-name
class RegressionRunner(BaseRunner[SequenceRegressor]):
"""A runnner that takes the predictions of a sequence regressor."""
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["RegressionRunner"]):
def collect_results(self, results: List[Dict]) -> None:
predictions_sum = np.zeros_like(results[0]["prediction"])
mse_loss = 0.
for sess_result in results:
if "mse" in sess_result:
mse_loss += sess_result["mse"]
predictions_sum += sess_result["prediction"]
predictions = (predictions_sum / len(results)).tolist()
if self.executor.postprocess is not None:
predictions = self.executor.postprocess(predictions)
self.set_runner_result(outputs=predictions, losses=[mse_loss])
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: SequenceRegressor,
postprocess: Postprocessor = None) -> None:
check_argument_types()
super().__init__(output_series, decoder)
self.postprocess = postprocess
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"prediction": self.decoder.predictions,
"mse": self.decoder.cost}
@property
def loss_names(self) -> List[str]:
return ["mse"]
| bsd-3-clause | 0d807aff5d86e00f44146fb93df8d5c7 | 33.142857 | 74 | 0.650105 | 4.248889 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/model/model_part.py | 1 | 4048 | """Basic functionality of all model parts."""
from abc import ABCMeta
from typing import MutableSet, Set, List, Tuple, Iterable
from neuralmonkey.model.parameterized import Parameterized, InitializerSpecs
from neuralmonkey.model.feedable import Feedable
class GenericModelPart(metaclass=ABCMeta):
"""Base class for Neural Monkey model parts.
Neural Monkey dynamically decides which model parts are in use when using a
specific trainer or a runner. Each trainer/runner holds a reference to a
top-level model part, which is then responsible for collecting references
to all `Parameterized` and `Feedable` objects that contribute to the
computation of its Tensors. This behavior is implemented using the
`get_dependencies` method, which is called recursively on all instances of
`GenericModelPart` class that are references from within a model part.
Apart from the `get_dependencies` method, this class also provides the
`dependencies` property which store the names of the Python class
attributes that are regarded as potential dependents of the
`GenericModelPart` object. These dependents are automatically checked for
type and when they are instances of the `GenericModelPart` class, results
of their `get_dependencies` are united and returned as dependencies of the
current object.
"""
@property
def dependencies(self) -> List[str]:
"""Return a list of attribute names regarded as dependents."""
return ["encoder", "parent_decoder", "input_sequence", "attentions",
"encoders"]
def __get_deps(
self,
attr: str,
feedables: MutableSet[Feedable],
parameterizeds: MutableSet[Parameterized]) -> None:
attr_val = getattr(self, attr, None)
if attr_val is None:
return
deps = [] # type: List[GenericModelPart]
if isinstance(attr_val, GenericModelPart):
deps = [attr_val]
elif isinstance(attr_val, Iterable):
deps = [a for a in attr_val if isinstance(a, GenericModelPart)]
for dep in deps:
feeds, params = dep.get_dependencies()
feedables |= feeds
parameterizeds |= params
def get_dependencies(self) -> Tuple[Set[Feedable], Set[Parameterized]]:
"""Collect all dependents of this object recursively.
The dependents are collected using the `dependencies` property. Each
stores a potential dependent object. If the object exsits and is an
instance of `GenericModelPart`, dependents are collected recursively by
calling its `get_dependencies` method.
If the object itself is instance of `Feedable` or `Parameterized`
class, it is added among the respective sets returned.
Returns:
A `Tuple` of `Set`s of `Feedable` and `Parameterized` objects.
"""
feedables = set() # type: Set[Feedable]
parameterizeds = set() # type: Set[Parameterized]
if isinstance(self, Feedable):
feedables |= {self}
if isinstance(self, Parameterized):
parameterizeds |= {self}
for attr in self.dependencies:
self.__get_deps(attr, feedables, parameterizeds)
return feedables, parameterizeds
class ModelPart(Parameterized, GenericModelPart, Feedable):
"""Base class of all parametric feedable model parts.
Serves as a syntactic sugar for labeling `Feedable`, `Parameterized`, and
`GenericModelPart` objects.
"""
def __init__(self,
name: str,
reuse: "ModelPart" = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
Parameterized.__init__(self, name, reuse, save_checkpoint,
load_checkpoint, initializers)
GenericModelPart.__init__(self)
with self.use_scope():
Feedable.__init__(self)
| bsd-3-clause | 6c2f15c766ae24fd8fe8dd6114a0abf3 | 38.300971 | 79 | 0.656374 | 4.482835 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/attention/transformer_cross_layer.py | 1 | 9898 | """Input combination strategies for multi-source Transformer decoder."""
# TODO add citation when URL becomes available
from typing import Callable, List
import tensorflow as tf
from neuralmonkey.attention.scaled_dot_product import attention
from neuralmonkey.tf_utils import layer_norm
# pylint: disable=too-many-arguments
def single(
queries: tf.Tensor,
states: tf.Tensor,
mask: tf.Tensor,
n_heads: int,
attention_dropout_callback: Callable[[tf.Tensor], tf.Tensor],
dropout_callback: Callable[[tf.Tensor], tf.Tensor],
normalize: bool = True,
use_dropout: bool = True,
residual: bool = True,
use_att_transform_bias: bool = False):
"""Run attention on a single encoder.
Arguments:
queries: The input for the attention.
states: The encoder states (keys & values).
mask: The temporal mask of the encoder.
n_heads: Number of attention heads to use.
attention_dropout_callback: Dropout function to apply in attention.
dropout_callback: Dropout function to apply on the attention output.
normalize: If True, run layer normalization on the queries.
use_dropout: If True, perform dropout on the attention output.
residual: If True, sum the context vector with the input queries.
use_att_transform_bias: If True, enable bias in the attention head
projections (for all queries, keys and values).
Returns:
A Tensor that contains the context vector.
"""
# Layer normalization
normalized_queries = layer_norm(queries) if normalize else queries
# Attend to the encoder
# TODO handle attention histories
encoder_context, _ = attention(
queries=normalized_queries,
keys=states,
values=states,
keys_mask=mask,
num_heads=n_heads,
dropout_callback=attention_dropout_callback,
use_bias=use_att_transform_bias)
# Apply dropout
if use_dropout:
encoder_context = dropout_callback(encoder_context)
# Add residual connections
if residual:
encoder_context += queries
return encoder_context
# pylint: enable=too-many-arguments
def serial(queries: tf.Tensor,
encoder_states: List[tf.Tensor],
encoder_masks: List[tf.Tensor],
heads: List[int],
attention_dropout_callbacks: List[Callable[[tf.Tensor], tf.Tensor]],
dropout_callback: Callable[[tf.Tensor], tf.Tensor]) -> tf.Tensor:
"""Run attention with serial input combination.
The procedure is as follows:
1. repeat for every encoder:
- lnorm + attend + dropout + add residual
2. update queries between layers
Arguments:
queries: The input for the attention.
encoder_states: The states of each encoder.
encoder_masks: The temporal mask of each encoder.
heads: Number of attention heads to use for each encoder.
attention_dropout_callbacks: Dropout functions to apply in attention
over each encoder.
dropout_callback: The dropout function to apply on the outputs of each
sub-attention.
Returns:
A Tensor that contains the context vector.
"""
context = queries
for i, (states, mask, n_heads, attn_drop_cb) in enumerate(zip(
encoder_states, encoder_masks, heads,
attention_dropout_callbacks)):
with tf.variable_scope("enc_{}".format(i)):
context = single(context, states, mask, n_heads,
attention_dropout_callback=attn_drop_cb,
dropout_callback=dropout_callback)
return context
def parallel(
queries: tf.Tensor,
encoder_states: List[tf.Tensor],
encoder_masks: List[tf.Tensor],
heads: List[int],
attention_dropout_callbacks: List[Callable[[tf.Tensor], tf.Tensor]],
dropout_callback: Callable[[tf.Tensor], tf.Tensor]) -> tf.Tensor:
"""Run attention with parallel input combination.
The procedure is as follows:
1. normalize queries,
2. attend and dropout independently for every encoder,
3. sum up the results
4. add residual and return
Arguments:
queries: The input for the attention.
encoder_states: The states of each encoder.
encoder_masks: The temporal mask of each encoder.
heads: Number of attention heads to use for each encoder.
attention_dropout_callbacks: Dropout functions to apply in attention
over each encoder.
dropout_callback: The dropout function to apply on the outputs of each
sub-attention.
Returns:
A Tensor that contains the context vector.
"""
normalized_queries = layer_norm(queries)
contexts = []
for i, (states, mask, n_heads, attn_drop_cb) in enumerate(zip(
encoder_states, encoder_masks, heads,
attention_dropout_callbacks)):
with tf.variable_scope("enc_{}".format(i)):
contexts.append(
single(normalized_queries, states, mask, n_heads,
attention_dropout_callback=attn_drop_cb,
dropout_callback=dropout_callback,
normalize=False, residual=False))
return sum(contexts) + queries
# pylint: disable=too-many-locals
def hierarchical(
queries: tf.Tensor,
encoder_states: List[tf.Tensor],
encoder_masks: List[tf.Tensor],
heads: List[int],
heads_hier: int,
attention_dropout_callbacks: List[Callable[[tf.Tensor], tf.Tensor]],
dropout_callback: Callable[[tf.Tensor], tf.Tensor]) -> tf.Tensor:
"""Run attention with hierarchical input combination.
The procedure is as follows:
1. normalize queries
2. attend to every encoder
3. attend to the resulting context vectors (reuse normalized queries)
4. apply dropout, add residual connection and return
Arguments:
queries: The input for the attention.
encoder_states: The states of each encoder.
encoder_masks: The temporal mask of each encoder.
heads: Number of attention heads to use for each encoder.
heads_hier: Number of attention heads to use in the second attention.
attention_dropout_callbacks: Dropout functions to apply in attention
over each encoder.
dropout_callback: The dropout function to apply in the second attention
and over the outputs of each sub-attention.
Returns:
A Tensor that contains the context vector.
"""
normalized_queries = layer_norm(queries)
contexts = []
batch = tf.shape(queries)[0]
time_q = tf.shape(queries)[1]
dimension = tf.shape(queries)[2]
for i, (states, mask, n_heads, attn_drop_cb) in enumerate(zip(
encoder_states, encoder_masks, heads,
attention_dropout_callbacks)):
with tf.variable_scope("enc_{}".format(i)):
contexts.append(
single(normalized_queries, states, mask, n_heads,
attention_dropout_callback=attn_drop_cb,
dropout_callback=dropout_callback,
normalize=False, residual=False))
# context is of shape [batch, time(q), channels(v)],
# stack to [batch, time(q), n_encoders, channels(v)]
# reshape to [batch x time(q), n_encoders, channels(v)]
stacked_contexts = tf.reshape(
tf.stack(contexts, axis=2),
[batch * time_q, len(encoder_states), dimension])
# hierarchical mask: ones of shape [batch x time(q), n_encoders]
hier_mask = tf.ones([batch * time_q, len(encoder_states)])
# reshape queries to [batch x time(q), 1, channels(v)]
reshaped_queries = tf.reshape(
normalized_queries, [batch * time_q, 1, dimension])
# returned shape [batch x time(q), 1, channels(v)]
with tf.variable_scope("enc_hier"):
# NOTE as attention dropout keep probability, we use the
# dropout_keep_prob value instead of attention_dropout_keep_prob.
encoder_context_stacked_batch = single(
reshaped_queries, stacked_contexts, hier_mask, heads_hier,
attention_dropout_callback=dropout_callback,
dropout_callback=lambda x: x, normalize=False, use_dropout=False,
residual=False)
# reshape back to [batch, time(q), channels(v)]
encoder_context = tf.reshape(
encoder_context_stacked_batch, [batch, time_q, dimension])
encoder_context = dropout_callback(encoder_context)
return encoder_context + queries
# pylint: enable=too-many-locals
def flat(queries: tf.Tensor,
encoder_states: List[tf.Tensor],
encoder_masks: List[tf.Tensor],
heads: int,
attention_dropout_callback: Callable[[tf.Tensor], tf.Tensor],
dropout_callback: Callable[[tf.Tensor], tf.Tensor]) -> tf.Tensor:
"""Run attention with flat input combination.
The procedure is as follows:
1. concatenate the states and mask along the time axis
2. run attention over the concatenation
Arguments:
queries: The input for the attention.
encoder_states: The states of each encoder.
encoder_masks: The temporal mask of each encoder.
heads: Number of attention heads to use for each encoder.
attention_dropout_callbacks: Dropout functions to apply in attention
over each encoder.
dropout_callback: The dropout function to apply on the output of the
attention.
Returns:
A Tensor that contains the context vector.
"""
concat_states = tf.concat(encoder_states, 1)
concat_mask = tf.concat(encoder_masks, 1)
return single(queries, concat_states, concat_mask, heads,
attention_dropout_callback, dropout_callback)
| bsd-3-clause | bdbe0c40fbb7b2e738b3338beaf87f48 | 36.634981 | 79 | 0.646797 | 4.352682 | false | false | false | false |
ufal/neuralmonkey | lib/subword_nmt/bpe_toy.py | 3 | 1687 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text.
Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary
of a text to a configurable number of symbols, with only a small increase in the number of tokens.
This is an (inefficient) toy implementation that shows the algorithm. For processing large datasets,
indexing and incremental updates can be used to speed up the implementation (see learn_bpe.py).
Reference:
Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.
Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
"""
import re
import sys
import collections
def get_stats(vocab):
pairs = collections.defaultdict(int)
for word, freq in vocab.items():
symbols = word.split()
for i in range(len(symbols)-1):
pairs[symbols[i],symbols[i+1]] += freq
return pairs
def merge_vocab(pair, v_in):
v_out = {}
bigram_pattern = re.escape(' '.join(pair))
p = re.compile(r'(?<!\S)' + bigram_pattern + r'(?!\S)')
for word in v_in:
w_out = p.sub(''.join(pair), word)
v_out[w_out] = v_in[word]
return v_out
vocab = {'l o w </w>' : 5, 'l o w e r </w>' : 2,
'n e w e s t </w>' : 6, 'w i d e s t </w>' : 3}
num_merges = 15
for i in range(num_merges):
pairs = get_stats(vocab)
best = max(pairs, key=pairs.get)
if pairs[best] < 2:
sys.stderr.write('no pair has frequency > 1. Stopping\n')
break
vocab = merge_vocab(best, vocab)
print(best)
| bsd-3-clause | 18f9ea72a60b33c71b80fb0204f53c6a | 34.145833 | 116 | 0.681091 | 3.153271 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/decoders/autoregressive.py | 1 | 22666 | # pylint: disable=too-many-lines
"""Abstract class for autoregressive decoding.
Either for the recurrent decoder, or for the transformer decoder.
The autoregressive decoder uses the while loop to get the outputs.
Descendants should only specify the initial state and the while loop body.
"""
from typing import NamedTuple, Callable, Optional, Any, List, Dict, Tuple
import tensorflow as tf
from neuralmonkey.dataset import Dataset
from neuralmonkey.decorators import tensor
from neuralmonkey.model.feedable import FeedDict
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.logging import warn
from neuralmonkey.model.sequence import EmbeddedSequence
from neuralmonkey.nn.utils import dropout
from neuralmonkey.tf_utils import (
append_tensor, get_variable, get_state_shape_invariants)
from neuralmonkey.vocabulary import (
Vocabulary, pad_batch, sentence_mask, UNK_TOKEN_INDEX, START_TOKEN_INDEX,
END_TOKEN_INDEX, PAD_TOKEN_INDEX)
class LoopState(NamedTuple(
"LoopState",
[("histories", Any),
("constants", Any),
("feedables", Any)])):
"""The loop state object.
The LoopState is a structure that works with the tf.while_loop function the
decoder loop state stores all the information that is not invariant for the
decoder run.
Attributes:
histories: A set of tensors that grow in time as the decoder proceeds.
constants: A set of independent tensors that do not change during the
entire decoder run.
feedables: A set of tensors used as the input of a single decoder step.
"""
class DecoderHistories(NamedTuple(
"DecoderHistories",
[("logits", tf.Tensor),
("output_states", tf.Tensor),
("output_symbols", tf.Tensor),
("output_mask", tf.Tensor),
("other", Any)])):
"""The values collected during the run of an autoregressive decoder.
This should only record decoding history and the decoding should not be
dependent on these values.
Attributes defined here (and in the `other`) substructure should always
be time-major (e.g., shape(time, batch, ...)).
Attributes:
logits: A tensor of shape ``(time, batch, vocabulary)`` which contains
the unnormalized output scores of words in a vocabulary.
output_states: A tensor of shape ``(time, batch, state_size)``. The
states of the decoder before the final output (logit) projection.
output_symbols: An int tensor of shape ``(time, batch)``. Stores the
generated symbols. (Either an argmax-ed value from the logits, or
a target token, during training.)
output_mask: A float tensor of zeros and ones of shape
``(time, batch)``. Keeps track of valid positions in the decoded
data.
other: A structure related to a specific AutoregressiveDecoder
implementation.
"""
class DecoderConstants(NamedTuple(
"DecoderConstants",
[("train_inputs", Optional[tf.Tensor])])):
"""The constants used by an autoregressive decoder.
Attributes:
train_inputs: During training, this is populated by the target token
ids.
"""
class DecoderFeedables(NamedTuple(
"DecoderFeedables",
[("step", tf.Tensor),
("finished", tf.Tensor),
("embedded_input", tf.Tensor),
("other", Any)])):
"""The input of a single step of an autoregressive decoder.
The decoder should be able to generate an output symbol only using the
information contained in this structure.
Attributes defined here (and in the `other`) substructure should always
be batch-major (e.g., shape(batch, ...)).
Attributes:
step: A scalar int tensor, stores the number of the current time step.
finished: A boolean tensor of shape ``(batch)``, which says whether
the decoding of a sentence in the batch is finished or not. (E.g.
whether the end token has already been generated.)
embedded_input: A ``batch``-sized tensor with embedded inputs to the
decoder. During inference, this contains the previously generated
tokens. During training, this contains the reference tokens.
other: A structure related to a specific AutoregressiveDecoder
implementation.
"""
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class AutoregressiveDecoder(ModelPart):
# pylint: disable=too-many-arguments,too-many-locals
def __init__(self,
name: str,
vocabulary: Vocabulary,
data_id: str,
max_output_len: int,
dropout_keep_prob: float = 1.0,
embedding_size: int = None,
embeddings_source: EmbeddedSequence = None,
tie_embeddings: bool = False,
label_smoothing: float = None,
supress_unk: bool = False,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Initialize parameters common for all autoregressive decoders.
Arguments:
name: Name of the decoder. Should be unique accross all Neural
Monkey objects.
vocabulary: Target vocabulary.
data_id: Target data series.
max_output_len: Maximum length of an output sequence.
reuse: Reuse the variables from the model part.
dropout_keep_prob: Probability of keeping a value during dropout.
embedding_size: Size of embedding vectors for target words.
embeddings_source: Embedded sequence to take embeddings from.
tie_embeddings: Use decoder.embedding_matrix also in place
of the output decoding matrix.
label_smoothing: Label smoothing parameter.
supress_unk: If true, decoder will not produce symbols for unknown
tokens.
"""
ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
initializers)
self.vocabulary = vocabulary
self.data_id = data_id
self.max_output_len = max_output_len
self.dropout_keep_prob = dropout_keep_prob
self._embedding_size = embedding_size
self.embeddings_source = embeddings_source
self.label_smoothing = label_smoothing
self.tie_embeddings = tie_embeddings
self.supress_unk = supress_unk
self.encoder_states = lambda: [] # type: Callable[[], List[tf.Tensor]]
self.encoder_masks = lambda: [] # type: Callable[[], List[tf.Tensor]]
# Check the values of the parameters (max_output_len, ...)
if self.max_output_len <= 0:
raise ValueError(
"Maximum sequence length must be a positive integer.")
if self._embedding_size is not None and self._embedding_size <= 0:
raise ValueError("Embedding size must be a positive integer.")
if self.dropout_keep_prob < 0.0 or self.dropout_keep_prob > 1.0:
raise ValueError("Dropout keep probability must be a real number "
"in the interval [0,1].")
# pylint: enable=too-many-arguments,too-many-locals
@property
def embedding_size(self) -> int:
if self.embeddings_source is None:
if self._embedding_size is None:
raise ValueError(
"You must specify either embedding size or the embedded "
"sequence from which to reuse the embeddings (e.g. set "
"'embedding_size' or 'embeddings_source' parameter)")
return self._embedding_size
if self.embeddings_source is not None:
if self._embedding_size is not None:
warn("Overriding the embedding_size parameter with the "
"size of the reused embeddings from the encoder.")
return self.embeddings_source.embedding_matrix.get_shape()[1].value
@tensor
def go_symbols(self) -> tf.Tensor:
return tf.fill([self.batch_size],
tf.constant(START_TOKEN_INDEX, dtype=tf.int64))
@property
def input_types(self) -> Dict[str, tf.DType]:
return {self.data_id: tf.string}
@property
def input_shapes(self) -> Dict[str, tf.TensorShape]:
return {self.data_id: tf.TensorShape([None, None])}
@tensor
def train_tokens(self) -> tf.Tensor:
return self.dataset[self.data_id]
@tensor
def train_inputs(self) -> tf.Tensor:
return tf.transpose(
self.vocabulary.strings_to_indices(self.train_tokens))
@tensor
def train_mask(self) -> tf.Tensor:
return sentence_mask(self.train_inputs)
@tensor
def decoding_w(self) -> tf.Variable:
if (self.tie_embeddings
and self.embedding_size != self.output_dimension):
raise ValueError(
"`embedding_size must be equal to the output_projection "
"size when using the `tie_embeddings` option")
with tf.name_scope("output_projection"):
if self.tie_embeddings:
return tf.transpose(self.embedding_matrix)
return get_variable(
"state_to_word_W",
[self.output_dimension, len(self.vocabulary)],
initializer=tf.random_uniform_initializer(-0.5, 0.5))
@tensor
def decoding_b(self) -> Optional[tf.Variable]:
if self.tie_embeddings:
return tf.zeros(len(self.vocabulary))
with tf.name_scope("output_projection"):
return get_variable(
"state_to_word_b",
[len(self.vocabulary)],
initializer=tf.zeros_initializer())
@tensor
def embedding_matrix(self) -> tf.Variable:
"""Variables and operations for embedding of input words.
If we are reusing word embeddings, this function takes the embedding
matrix from the first encoder
"""
if self.embeddings_source is not None:
return self.embeddings_source.embedding_matrix
assert self.embedding_size is not None
return get_variable(
name="word_embeddings",
shape=[len(self.vocabulary), self.embedding_size])
def embed_input_symbols(self, input_symbols: tf.Tensor) -> tf.Tensor:
embedded_input = tf.nn.embedding_lookup(
self.embedding_matrix, input_symbols)
return dropout(embedded_input, self.dropout_keep_prob, self.train_mode)
@tensor
def train_loop_result(self) -> LoopState:
return self.decoding_loop(train_mode=True)
@tensor
def train_logits(self) -> tf.Tensor:
train_result = LoopState(*self.train_loop_result)
return train_result.histories.logits
@tensor
def train_output_states(self) -> tf.Tensor:
train_result = LoopState(*self.train_loop_result)
return train_result.histories.output_states
@tensor
def train_logprobs(self) -> tf.Tensor:
return tf.nn.log_softmax(self.train_logits)
@tensor
def train_xents(self) -> tf.Tensor:
train_targets = tf.transpose(self.train_inputs)
softmax_function = None
if self.label_smoothing:
softmax_function = (
lambda labels, logits: tf.losses.softmax_cross_entropy(
tf.one_hot(labels, len(self.vocabulary)),
logits, label_smoothing=self.label_smoothing))
# Return losses of shape (batch, time). Losses on invalid positions
# are zero.
return tf.contrib.seq2seq.sequence_loss(
tf.transpose(self.train_logits, perm=[1, 0, 2]),
train_targets,
tf.transpose(self.train_mask),
average_across_batch=False,
average_across_timesteps=False,
softmax_loss_function=softmax_function)
@tensor
def train_loss(self) -> tf.Tensor:
# Cross entropy mean over all words in the batch
# (could also be done as a mean over sentences)
return tf.reduce_sum(self.train_xents) / tf.reduce_sum(self.train_mask)
@property
def cost(self) -> tf.Tensor:
return self.train_loss
@tensor
def runtime_loop_result(self) -> LoopState:
return self.decoding_loop(train_mode=False)
@tensor
def runtime_logits(self) -> tf.Tensor:
runtime_result = LoopState(*self.runtime_loop_result)
return runtime_result.histories.logits
@tensor
def runtime_output_states(self) -> tf.Tensor:
runtime_result = LoopState(*self.runtime_loop_result)
return runtime_result.histories.output_states
@tensor
def runtime_mask(self) -> tf.Tensor:
runtime_result = LoopState(*self.runtime_loop_result)
return runtime_result.histories.output_mask
@tensor
def decoded(self) -> tf.Tensor:
# We disable generating of <pad> tokens at index 0
# (self.runtime_logits[:, :, 1:]). This shifts the indices
# of the decoded tokens (therefore, we add +1 to the decoded
# output indices).
# self.runtime_logits is of size [batch, sentence_len, vocabulary_size]
return tf.argmax(self.runtime_logits[:, :, 1:], -1) + 1
@tensor
def runtime_xents(self) -> tf.Tensor:
train_targets = tf.transpose(self.train_inputs)
batch_major_logits = tf.transpose(self.runtime_logits, [1, 0, 2])
min_time = tf.minimum(tf.shape(train_targets)[1],
tf.shape(batch_major_logits)[1])
# NOTE if done properly, there should be padding of the shorter
# sequence instead of cropping to the length of the shorter one
return tf.contrib.seq2seq.sequence_loss(
logits=batch_major_logits[:, :min_time],
targets=train_targets[:, :min_time],
weights=tf.transpose(self.train_mask)[:, :min_time],
average_across_batch=False,
average_across_timesteps=False)
@tensor
def runtime_loss(self) -> tf.Tensor:
return (tf.reduce_sum(self.runtime_xents)
/ tf.reduce_sum(tf.to_float(self.runtime_mask)))
@tensor
def runtime_logprobs(self) -> tf.Tensor:
return tf.nn.log_softmax(self.runtime_logits)
@property
def output_dimension(self) -> int:
raise NotImplementedError("Abstract property")
def get_initial_feedables(self) -> DecoderFeedables:
return DecoderFeedables(
step=tf.constant(0, tf.int32),
finished=tf.zeros([self.batch_size], dtype=tf.bool),
embedded_input=self.embed_input_symbols(self.go_symbols),
other=None)
def get_initial_histories(self) -> DecoderHistories:
output_states = tf.zeros(
shape=[0, self.batch_size, self.embedding_size],
dtype=tf.float32,
name="hist_output_states")
output_mask = tf.zeros(
shape=[0, self.batch_size],
dtype=tf.bool,
name="hist_output_mask")
output_symbols = tf.zeros(
shape=[0, self.batch_size],
dtype=tf.int64,
name="hist_output_symbols")
logits = tf.zeros(
shape=[0, self.batch_size, len(self.vocabulary)],
dtype=tf.float32,
name="hist_logits")
return DecoderHistories(
logits=logits,
output_states=output_states,
output_mask=output_mask,
output_symbols=output_symbols,
other=None)
def get_initial_constants(self) -> DecoderConstants:
return DecoderConstants(train_inputs=self.train_inputs)
def get_initial_loop_state(self) -> LoopState:
return LoopState(
feedables=self.get_initial_feedables(),
histories=self.get_initial_histories(),
constants=self.get_initial_constants())
def loop_continue_criterion(self, *args) -> tf.Tensor:
"""Decide whether to break out of the while loop.
Arguments:
loop_state: ``LoopState`` instance (see the docs for this module).
Represents current decoder loop state.
"""
loop_state = LoopState(*args)
finished = loop_state.feedables.finished
not_all_done = tf.logical_not(tf.reduce_all(finished))
before_max_len = tf.less(loop_state.feedables.step,
self.max_output_len)
return tf.logical_and(not_all_done, before_max_len)
def next_state(self, loop_state: LoopState) -> Tuple[tf.Tensor, Any, Any]:
raise NotImplementedError("Abstract method.")
def get_body(self, train_mode: bool, sample: bool = False,
temperature: float = 1.) -> Callable:
"""Return the while loop body function."""
def is_finished(finished: tf.Tensor, symbols: tf.Tensor) -> tf.Tensor:
has_just_finished = tf.equal(symbols, END_TOKEN_INDEX)
return tf.logical_or(finished, has_just_finished)
def state_to_logits(state: tf.Tensor) -> tf.Tensor:
logits = tf.matmul(state, self.decoding_w)
logits += self.decoding_b
if self.supress_unk:
unk_mask = tf.one_hot(
UNK_TOKEN_INDEX, depth=len(self.vocabulary), on_value=-1e9)
logits += unk_mask
return logits
def logits_to_symbols(logits: tf.Tensor,
loop_state: LoopState) -> tf.Tensor:
step = loop_state.feedables.step
if sample:
next_symbols = tf.squeeze(
tf.multinomial(logits, num_samples=1), axis=1)
elif train_mode:
next_symbols = loop_state.constants.train_inputs[step]
else:
next_symbols = tf.argmax(logits, axis=1)
int_unfinished_mask = tf.to_int64(
tf.logical_not(loop_state.feedables.finished))
# Note this works only when PAD_TOKEN_INDEX is 0. Otherwise
# this have to be rewritten
assert PAD_TOKEN_INDEX == 0
next_symbols = next_symbols * int_unfinished_mask
return next_symbols
def body(*args) -> LoopState:
loop_state = LoopState(*args)
feedables = loop_state.feedables
histories = loop_state.histories
with tf.variable_scope(self._variable_scope, reuse=tf.AUTO_REUSE):
output_state, dec_other, hist_other = self.next_state(
loop_state)
logits = state_to_logits(output_state)
logits /= temperature
next_symbols = logits_to_symbols(logits, loop_state)
finished = is_finished(feedables.finished, next_symbols)
next_feedables = DecoderFeedables(
step=feedables.step + 1,
finished=finished,
embedded_input=self.embed_input_symbols(next_symbols),
other=dec_other)
next_histories = DecoderHistories(
logits=append_tensor(histories.logits, logits),
output_states=append_tensor(
histories.output_states, output_state),
output_symbols=append_tensor(
histories.output_symbols, next_symbols),
output_mask=append_tensor(
histories.output_mask, tf.logical_not(finished)),
other=hist_other)
return LoopState(
feedables=next_feedables,
histories=next_histories,
constants=loop_state.constants)
return body
def finalize_loop(self, final_loop_state: LoopState,
train_mode: bool) -> None:
"""Execute post-while loop operations.
Arguments:
final_loop_state: Decoder loop state at the end
of the decoding loop.
train_mode: Boolean flag, telling whether this is
a training run.
"""
def decoding_loop(self, train_mode: bool, sample: bool = False,
temperature: float = 1) -> LoopState:
"""Run the decoding while loop.
Calls get_initial_loop_state and constructs tf.while_loop
with the continuation criterion returned from loop_continue_criterion,
and body function returned from get_body.
After finishing the tf.while_loop, it calls finalize_loop
to further postprocess the final decoder loop state (usually
by stacking Tensors containing decoding histories).
Arguments:
train_mode: Boolean flag, telling whether this is
a training run.
sample: Boolean flag, telling whether we should sample
the output symbols from the output distribution instead
of using argmax or gold data.
temperature: float value specifying the softmax temperature
"""
initial_loop_state = self.get_initial_loop_state()
with tf.control_dependencies([self.decoding_w, self.decoding_b]):
final_loop_state = tf.while_loop(
self.loop_continue_criterion,
self.get_body(train_mode, sample, temperature),
initial_loop_state,
shape_invariants=tf.contrib.framework.nest.map_structure(
get_state_shape_invariants, initial_loop_state))
self.finalize_loop(final_loop_state, train_mode)
return final_loop_state
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
"""Populate the feed dictionary for the decoder object.
Arguments:
dataset: The dataset to use for the decoder.
train: Boolean flag, telling whether this is a training run.
"""
fd = ModelPart.feed_dict(self, dataset, train)
sentences = dataset.maybe_get_series(self.data_id)
if sentences is None and train:
raise ValueError("When training, you must feed "
"reference sentences")
if sentences is not None:
fd[self.train_tokens] = pad_batch(
list(sentences), self.max_output_len, add_start_symbol=False,
add_end_symbol=True)
return fd
| bsd-3-clause | 5c204f2ea76de930d1c8c54e8e276e30 | 37.811644 | 79 | 0.61215 | 4.269354 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/evaluators/beer.py | 1 | 2482 | import tempfile
import subprocess
from typing import List
from typeguard import check_argument_types
from neuralmonkey.logging import log
from neuralmonkey.evaluators.evaluator import Evaluator
class BeerWrapper(Evaluator[List[str]]):
"""Wrapper for BEER scorer.
Paper: http://aclweb.org/anthology/D14-1025
Code: https://github.com/stanojevic/beer
"""
def __init__(self,
wrapper: str,
name: str = "BEER",
encoding: str = "utf-8") -> None:
"""Initialize the BEER wrapper.
Args:
name: Name of the evaluator.
wrapper: Path to the BEER's executable.
encoding: Data encoding.
"""
check_argument_types()
super().__init__(name)
self.wrapper = wrapper
self.encoding = encoding
def serialize_to_bytes(self, sentences: List[List[str]]) -> bytes:
joined = [" ".join(r) for r in sentences]
string = "\n".join(joined) + "\n"
return string.encode(self.encoding)
def score_batch(self,
hypotheses: List[List[str]],
references: List[List[str]]) -> float:
ref_bytes = self.serialize_to_bytes(references)
hyp_bytes = self.serialize_to_bytes(hypotheses)
with tempfile.NamedTemporaryFile() as reffile, \
tempfile.NamedTemporaryFile() as hypfile:
reffile.write(ref_bytes)
reffile.flush()
hypfile.write(hyp_bytes)
hypfile.flush()
args = [self.wrapper, "-r", reffile.name, "-s", hypfile.name]
output_proc = subprocess.run(args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
proc_stdout = output_proc.stdout.decode("utf-8") # type: ignore
lines = proc_stdout.splitlines()
if not lines:
return 0.0
try:
beer_score = float(lines[0].split()[-1])
return beer_score
except IndexError:
log("Error: Malformed output from BEER wrapper:", color="red")
log(proc_stdout, color="red")
log("=======", color="red")
return 0.0
except ValueError:
log("Value error - beer '{}' is not a number.".format(
lines[0]), color="red")
return 0.0
| bsd-3-clause | 5984f38326c6b692c25fa9cd129e2c29 | 30.820513 | 78 | 0.532635 | 4.339161 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/trainers/self_critical_objective.py | 1 | 8044 | """Training objective for self-critical learning.
Self-critic learning is a modification of the REINFORCE algorithm that uses the
reward of the train-time decoder output as a baseline in the update step.
For more details see: https://arxiv.org/pdf/1612.00563.pdf
"""
from typing import Callable, Iterable, Tuple, Optional
from itertools import takewhile
from collections import Counter
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decoders.decoder import Decoder
from neuralmonkey.decorators import tensor
from neuralmonkey.trainers.generic_trainer import Objective
from neuralmonkey.vocabulary import END_TOKEN_INDEX
# pylint: disable=invalid-name
RewardFunction = Callable[[np.ndarray, np.ndarray], np.ndarray]
# pylint: enable=invalid-name
class SelfCriticalObjective(Objective[Decoder]):
def __init__(self, decoder: Decoder, reward_function: RewardFunction,
weight: float = None) -> None:
"""Self-critical objective.
Args:
decoder: A recurrent decoder.
reward_function: A reward function computing score in Python.
weight: Mixing weight for a trainer.
Returns:
Objective object to be used in generic trainer.
"""
check_argument_types()
name = "{}_self_critical".format(decoder.name)
super().__init__(name, decoder)
self.reward_function = reward_function
self._weight = weight
@tensor
def weight(self) -> Optional[tf.Tensor]:
if self._weight is None:
return None
return tf.constant(self._weight)
@tensor
def loss(self) -> tf.Tensor:
# decoded, shape (time, batch)
train_decoded = tf.argmax(self.decoder.train_logits, axis=2)
runtime_decoded = tf.argmax(self.decoder.runtime_logits, axis=2)
reference = self.decoder.train_inputs
# rewards, shape (batch)
train_reward = tf.py_func(
self.reward_function, [reference, train_decoded], tf.float32)
runtime_reward = tf.py_func(
self.reward_function, [reference, runtime_decoded], tf.float32)
tf.summary.scalar(
"train_{}/{}".format(self.decoder.data_id,
self.reward_function.__name__),
tf.reduce_mean(runtime_reward),
collections=["summary_train"])
# REINFORCE score: shape (time, batch, vocab)
score_by_word = reinforce_score(
runtime_reward, train_reward, runtime_decoded,
self.decoder.runtime_logits)
float_mask = tf.to_float(self.decoder.runtime_mask)
masked_score_by_word = score_by_word * float_mask
# sum the matrix (dot product of rows, sum over time, and over batch)
# pylint: disable=invalid-unary-operand-type
loss = -tf.reduce_sum(masked_score_by_word) / tf.reduce_sum(float_mask)
# pylint: enable=invalid-unary-operand-type
tf.summary.scalar(
"train_{}/self_critical_cost".format(self.decoder.data_id),
loss, collections=["summary_train"])
return loss
def reinforce_score(reward: tf.Tensor,
baseline: tf.Tensor,
decoded: tf.Tensor,
logits: tf.Tensor) -> tf.Tensor:
"""Cost function whose derivative is the REINFORCE equation.
This implements the primitive function to the central equation of the
REINFORCE algorithm that estimates the gradients of the loss with respect
to decoder logits.
It uses the fact that the second term of the product (the difference of the
word distribution and one hot vector of the decoded word) is a derivative
of negative log likelihood of the decoded word. The reward function and the
baseline are however treated as a constant, so they influence the derivate
only multiplicatively.
"""
# shape (1, batch, 1)
reward_diff = tf.expand_dims(reward - baseline, 0)
# runtime probabilities, shape (time, batch, vocab)
decoded_neg_likelihood = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=decoded, logits=logits)
# REINFORCE gradient, shape (time, batch, vocab)
score = tf.stop_gradient(reward_diff) * decoded_neg_likelihood
return score
def sentence_bleu(references: np.ndarray,
hypotheses: np.ndarray) -> np.ndarray:
"""Compute index-based sentence-level BLEU score.
Computes sentence level BLEU on indices outputed by the decoder, i.e.
whatever the decoder uses as a unit is used a token in the BLEU
computation, ignoring the tokens may be sub-word units.
"""
bleu_scores = []
for ref, hyp in zip(np.transpose(references),
np.transpose(hypotheses)):
matched_counts = []
hyp_n_grams_counts = []
for n in range(1, 5):
matched, total, _ = _count_matching_n_grams(ref, hyp, n)
if n > 1:
matched += 1
total += 1
matched_counts.append(matched)
hyp_n_grams_counts.append(total)
if hyp_n_grams_counts[0] == 0:
bleu_scores.append(0.)
else:
precision = (
np.prod(matched_counts) / np.prod(hyp_n_grams_counts)) ** .25
ref_len = sum(1 for _ in
takewhile(lambda i: i != END_TOKEN_INDEX, ref))
brevity_penalty = np.min([
1., np.exp(1 - ref_len / hyp_n_grams_counts[0])])
bleu_scores.append(brevity_penalty * precision)
assert all(0 <= s <= 1 for s in bleu_scores)
return np.array(bleu_scores, dtype=np.float32)
def sentence_gleu(references: np.ndarray,
hypotheses: np.ndarray) -> np.ndarray:
"""Compute index-based GLEU score.
GLEU score is a sentence-level metric used in Google's Neural MT as a
reward in reinforcement learning (https://arxiv.org/abs/1609.08144).
It is a minimum of precision and recall on 1- to 4-grams.
It operates over the indices emitted by the decoder which are not
necessarily tokens (could be characters or subword units).
"""
gleu_scores = []
for ref, hyp in zip(np.transpose(references),
np.transpose(hypotheses)):
matched_counts = []
hyp_n_grams_counts = []
ref_n_grams_counts = []
for n in range(1, 5):
matched, total_hyp, total_ref = _count_matching_n_grams(
ref, hyp, n)
matched_counts.append(matched)
hyp_n_grams_counts.append(total_hyp)
ref_n_grams_counts.append(total_ref)
precision = np.sum(matched_counts) / np.sum(hyp_n_grams_counts)
recall = np.sum(matched_counts) / np.sum(ref_n_grams_counts)
assert 0. <= precision <= 1.0
assert 0. <= recall <= 1.0
gleu_scores.append(min(precision, recall))
return np.array(gleu_scores, dtype=np.float32)
def _count_matching_n_grams(ref: np.ndarray,
hyp: np.ndarray,
n: int) -> Tuple[int, int, int]:
ref_counts = Counter() # type: Counter
total_ref_n_grams = 0
for n_gram in _get_n_grams(ref, n):
ref_counts[str(n_gram)] += 1
total_ref_n_grams += 1
matched_n_grams = 0
total_hyp_n_grams = 0
hyp_n_grams = _get_n_grams(hyp, n)
for n_gram in hyp_n_grams:
n_gram_s = str(n_gram)
if ref_counts[n_gram_s] > 0:
matched_n_grams += 1
ref_counts[n_gram_s] -= 1
total_hyp_n_grams += 1
assert matched_n_grams <= total_hyp_n_grams
assert matched_n_grams <= total_ref_n_grams
return matched_n_grams, total_hyp_n_grams, total_ref_n_grams
def _get_n_grams(indices: np.ndarray, order: int) -> Iterable[np.ndarray]:
all_n_grams = [indices[i:i + order]
for i in range(len(indices) - order + 1)]
return takewhile(lambda g: g[-1] != END_TOKEN_INDEX, all_n_grams)
| bsd-3-clause | 4c7221fa0ad05c93d6e1866875894899 | 33.822511 | 79 | 0.620338 | 3.783631 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/runners/runner.py | 1 | 3169 | from typing import Dict, List, Callable, Union
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.runners.base_runner import BaseRunner, NextExecute
from neuralmonkey.decoders.autoregressive import AutoregressiveDecoder
from neuralmonkey.decoders.classifier import Classifier
from neuralmonkey.decorators import tensor
# pylint: disable=invalid-name
SupportedDecoder = Union[AutoregressiveDecoder, Classifier]
Postprocessor = Callable[[List[List[str]]], List[List[str]]]
# pylint: enable=invalid-name
class GreedyRunner(BaseRunner[SupportedDecoder]):
class Executable(BaseRunner.Executable["GreedyRunner"]):
def next_to_execute(self) -> NextExecute:
"""Get the tensors and additional feed dicts for execution."""
fetches = self.executor.fetches
if not self.summaries:
fetches["image_summaries"] = None
if not self.compute_losses:
fetches["train_xent"] = tf.zeros([])
fetches["runtime_xent"] = tf.zeros([])
return fetches, []
def collect_results(self, results: List[Dict]) -> None:
train_loss = 0.
runtime_loss = 0.
summed_logprobs = [-np.inf for _ in range(
results[0]["decoded_logprobs"].shape[0])]
for sess_result in results:
train_loss += sess_result["train_xent"]
runtime_loss += sess_result["runtime_xent"]
for i, logprob in enumerate(sess_result["decoded_logprobs"]):
summed_logprobs[i] = np.logaddexp(
summed_logprobs[i], logprob)
argmaxes = [np.argmax(l, axis=1) for l in summed_logprobs]
decoded_tokens = self.executor.vocabulary.vectors_to_sentences(
argmaxes)
if self.executor.postprocess is not None:
decoded_tokens = self.executor.postprocess(decoded_tokens)
summaries = None
if "image_summaries" in results[0]:
summaries = [results[0]["image_summaries"]]
self.set_runner_result(
outputs=decoded_tokens, losses=[train_loss, runtime_loss],
summaries=summaries)
def __init__(self,
output_series: str,
decoder: SupportedDecoder,
postprocess: Postprocessor = None) -> None:
check_argument_types()
super().__init__(output_series, decoder)
self.postprocess = postprocess
self.vocabulary = self.decoder.vocabulary
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
fetches = {"decoded_logprobs": self.decoder.runtime_logprobs,
"train_xent": self.decoder.train_loss,
"runtime_xent": self.decoder.runtime_loss}
att_plot_summaries = tf.get_collection("summary_att_plots")
if att_plot_summaries:
fetches["image_summaries"] = tf.summary.merge(att_plot_summaries)
return fetches
@property
def loss_names(self) -> List[str]:
return ["train_xent", "runtime_xent"]
| bsd-3-clause | ffe5a67c40dd1556f7e821448283390c | 34.211111 | 77 | 0.611549 | 4.253691 | false | false | false | false |
ufal/neuralmonkey | lib/subword_nmt/segment-char-ngrams.py | 3 | 2377 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
from __future__ import unicode_literals, division
import sys
import codecs
import argparse
# hack for python2/3 compatibility
from io import open
argparse.open = open
# python 2/3 compatibility
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="segment rare words into character n-grams")
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
metavar='PATH',
help="Input file (default: standard input).")
parser.add_argument(
'--vocab', type=argparse.FileType('r'), metavar='PATH',
required=True,
help="Vocabulary file.")
parser.add_argument(
'--shortlist', type=int, metavar='INT', default=0,
help="do not segment INT most frequent words in vocabulary (default: '%(default)s')).")
parser.add_argument(
'-n', type=int, metavar='INT', default=2,
help="segment rare words into character n-grams of size INT (default: '%(default)s')).")
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
metavar='PATH',
help="Output file (default: standard output)")
parser.add_argument(
'--separator', '-s', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s'))")
return parser
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
vocab = [line.split()[0] for line in args.vocab if len(line.split()) == 2]
vocab = dict((y,x) for (x,y) in enumerate(vocab))
for line in args.input:
for word in line.split():
if word not in vocab or vocab[word] > args.shortlist:
i = 0
while i*args.n < len(word):
args.output.write(word[i*args.n:i*args.n+args.n])
i += 1
if i*args.n < len(word):
args.output.write(args.separator)
args.output.write(' ')
else:
args.output.write(word + ' ')
args.output.write('\n')
| bsd-3-clause | d64c519fbba7bf78a48e718f32b45bab | 32.478873 | 96 | 0.611275 | 3.656923 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/trainers/delayed_update_trainer.py | 1 | 9072 | # pylint: disable=unused-import
from typing import Dict, List, Tuple, Optional
# pylint: enable=unused-import
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decorators import tensor
from neuralmonkey.runners.base_runner import GraphExecutor, NextExecute
from neuralmonkey.trainers.generic_trainer import (GenericTrainer, Objective,
Gradients)
class DelayedUpdateTrainer(GenericTrainer):
class Executable(GraphExecutor.Executable["DelayedUpdateTrainer"]):
def __init__(self, executor: "DelayedUpdateTrainer",
compute_losses: bool, summaries: bool,
num_sessions: int) -> None:
assert compute_losses
if num_sessions != 1:
raise ValueError(
"Trainer only supports execution in a single session")
super().__init__(executor, compute_losses, summaries, num_sessions)
self.state = 0
self.res_sums = [] # type: List[tf.Summary]
self.res_losses = None # type: Optional[List[float]]
self.res_batch = None # type: Optional[int]
def next_to_execute(self) -> NextExecute:
if self.state == 0: # ACCUMULATING
fetches = {"accumulators": self.executor.accumulate_ops,
"counter": self.executor.cumulator_counter,
"batch_size": self.executor.batch_size,
"losses": self.executor.objective_values}
elif self.state == 1: # UPDATING
fetches = {
"train_op": self.executor.train_op,
"_update_ops": tf.get_collection(tf.GraphKeys.UPDATE_OPS)}
if self.summaries:
fetches.update(self.executor.summaries)
else: # RESETTING
fetches = {"resets": self.executor.reset_ops}
return fetches, []
def collect_results(self, results: List[Dict]) -> None:
assert len(results) == 1
result = results[0]
if self.state == 0: # ACCUMULATING
self.res_losses = result["losses"]
self.res_batch = result["batch_size"]
# Are we updating?
counter = result["counter"]
if counter == self.executor.batches_per_update:
self.state = 1
return
elif self.state == 1:
if self.summaries:
self.res_sums = [result["scalar_summaries"],
result["histogram_summaries"]]
self.state = 2
return
assert self.res_losses is not None
assert self.res_batch is not None
objective_names = [obj.name for obj in self.executor.objectives]
objective_names += ["L1", "L2"]
losses = dict(zip(objective_names, self.res_losses))
self.set_result({}, losses, self.res_batch, self.res_sums)
# pylint: disable=too-many-arguments
def __init__(self,
batches_per_update: int,
objectives: List[Objective],
l1_weight: float = 0.0,
l2_weight: float = 0.0,
clip_norm: float = None,
optimizer: tf.train.Optimizer = None,
var_scopes: List[str] = None,
var_collection: str = None) -> None:
check_argument_types()
GenericTrainer.__init__(self, objectives, l1_weight, l2_weight,
clip_norm, optimizer, var_scopes,
var_collection)
self.batches_per_update = batches_per_update
# pylint: enable=too-many-arguments
@tensor
def existing_grads_and_vars(self) -> Tuple[
List[tf.Tensor], List[tf.Variable]]:
orig_grads = super().raw_gradients
# pylint: disable=not-an-iterable
# Pylint does not understand @tensor annotations
transposed = tuple(zip(
*[(grad, var) for grad, var in orig_grads if grad is not None]))
# pylint: enable=not-an-iterable
return list(transposed[0]), list(transposed[1])
@tensor
def gradient_buffers(self) -> List[tf.Variable]:
# pylint: disable=unpacking-non-sequence
existing_gradients, _ = self.existing_grads_and_vars
# pylint: enable=unpacking-non-sequence
with tf.variable_scope("gradient_buffer"):
return [tf.Variable(initial_value=tf.zeros_like(grad),
trainable=False)
for grad in existing_gradients]
@tensor
def objective_buffers(self) -> List[tf.Variable]:
with tf.variable_scope("loss_buffers"):
return [tf.Variable(0.0, trainable=False) for _ in self.objectives]
# pylint: disable=no-self-use
@tensor
def diff_buffer(self) -> tf.Variable:
return tf.Variable(0.0, trainable=False)
@tensor
def cumulator_counter(self) -> tf.Variable:
return tf.Variable(0, trainable=False, name="cumulator_counter")
# pylint: enable=no-self-use
@tensor
def accumulate_ops(self) -> List[tf.Operation]:
# pylint: disable=unpacking-non-sequence
existing_gradients, _ = self.existing_grads_and_vars
# pylint: enable=unpacking-non-sequence
# pylint: disable=not-an-iterable
# Pylint does not understand @tensor annotations
accumulate_ops = [
tf.assign_add(gradbuf, grad)
for gradbuf, grad in zip(
self.gradient_buffers, existing_gradients)]
accumulate_ops.extend(
tf.assign_add(objbuf, obj.loss)
for objbuf, obj in zip(self.objective_buffers, self.objectives))
# pylint: enable=not-an-iterable
accumulate_ops.append(
tf.assign_add(self.diff_buffer, self.differentiable_loss_sum))
accumulate_ops.append(
tf.assign_add(self.cumulator_counter, 1))
return accumulate_ops
@tensor
def reset_ops(self) -> List[tf.Operation]:
# pylint: disable=not-an-iterable
# Pylint does not understand @tensor annotations
reset_ops = [tf.assign(gradbuf, tf.zeros_like(gradbuf))
for gradbuf in self.gradient_buffers]
reset_ops.extend(
tf.assign(objbuf, 0.0) for objbuf in self.objective_buffers)
# pylint: enable=not-an-iterable
reset_ops.append(tf.assign(self.diff_buffer, 0.0))
reset_ops.append(tf.assign(self.cumulator_counter, 0))
return reset_ops
@tensor
def raw_gradients(self) -> Gradients:
"""Return averaged gradients over buffers."""
# pylint: disable=not-an-iterable
# Pylint does not understand @tensor annotations
averaged_grads = [grad / tf.to_float(self.cumulator_counter)
for grad in self.gradient_buffers]
# pylint: enable=not-an-iterable
tf.summary.scalar(
"train_opt_cost",
self.diff_buffer / tf.to_float(self.cumulator_counter),
collections=["summary_train"])
# log all objectives
for obj, objbuf in zip(self.objectives, self.objective_buffers):
tf.summary.scalar(
obj.name, objbuf / tf.to_float(self.cumulator_counter),
collections=["summary_train"])
# now, zip averaged grads with associated vars to a Gradients struct.
# pylint: disable=unpacking-non-sequence
_, existing_vars = self.existing_grads_and_vars
# pylint: enable=unpacking-non-sequence
return list(zip(averaged_grads, existing_vars))
@tensor
def summaries(self) -> Dict[str, tf.Tensor]:
# pylint: disable=protected-access
if isinstance(self.optimizer._lr, tf.Tensor):
tf.summary.scalar("learning_rate", self.optimizer._lr,
collections=["summary_train"])
# pylint: enable=protected-access
# pylint: disable=unpacking-non-sequence
l1_norm, l2_norm = self.regularization_losses
# pylint: enable=unpacking-non-sequence
tf.summary.scalar("train_l1", l1_norm, collections=["summary_train"])
tf.summary.scalar("train_l2", l2_norm, collections=["summary_train"])
# pylint: disable=not-an-iterable
# Pylint does not understand @tensor annotations
for grad, var in self.gradients:
if grad is not None:
summary_name = "gr_{}".format(var.name)
tf.summary.histogram(
summary_name, grad, collections=["summary_gradients"])
# pylint: enable=not-an-iterable
return {
"scalar_summaries": tf.summary.merge(
tf.get_collection("summary_train")),
"histogram_summaries": tf.summary.merge(
tf.get_collection("summary_gradients"))}
| bsd-3-clause | 4df8a04e1c83750dfe09805d23f4167f | 37.769231 | 79 | 0.578924 | 4.182573 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/writers/plain_text_writer.py | 1 | 1974 | from typing import Iterator, List, Any, Callable
from neuralmonkey.logging import log
from neuralmonkey.readers.plain_text_reader import ALNUM_CHARSET
# pylint: disable=invalid-name
# Writer: function that gets file and the data
Writer = Callable[[str, Any], None]
# pylint: enable=invalid-name
def t2t_detokenize(data: Iterator[List[str]]) -> Iterator[str]:
"""Detokenize text tokenized by t2t_tokenized_text_reader.
Method is inspired by tensor2tensor tokenizer.decode method:
https://github.com/tensorflow/tensor2tensor/blob/v1.5.5/tensor2tensor/data_generators/tokenizer.py
"""
for sentence in data:
is_alnum = [t[0] in ALNUM_CHARSET for t in sentence]
ret = []
for i, token in enumerate(sentence):
if i > 0 and is_alnum[i - 1] and is_alnum[i]:
ret.append(" ")
ret.append(token)
yield "".join(ret)
def text_writer(encoding: str = "utf-8") -> Writer:
def writer(path: str, data: Iterator) -> None:
with open(path, "w", encoding=encoding) as f_out:
for sentence in data:
f_out.write(str(sentence) + "\n")
log("Result saved as plain text in '{}'".format(path))
return writer
def tokenized_text_writer(encoding: str = "utf-8") -> Writer:
"""Get a writer that is reversed to the tokenized_text_reader."""
def writer(path: str, data: Iterator[List[str]]) -> None:
wrt = text_writer(encoding)
wrt(path, (" ".join(s) for s in data))
return writer
def t2t_tokenized_text_writer(encoding: str = "utf-8") -> Writer:
"""Get a writer that is reversed to the t2t_tokenized_text_reader."""
def writer(path: str, data: Iterator[List[str]]) -> None:
wrt = text_writer(encoding)
wrt(path, t2t_detokenize(data))
return writer
# pylint: disable=invalid-name
UtfPlainTextWriter = tokenized_text_writer()
T2TWriter = t2t_tokenized_text_writer()
# pylint: enable=invalid-name
| bsd-3-clause | 79323426254959c8ca5ff25fa1d2af6e | 31.9 | 102 | 0.653495 | 3.433043 | false | false | false | false |
ufal/neuralmonkey | neuralmonkey/encoders/transformer.py | 1 | 12857 | """Implementation of the encoder of the Transformer model.
Described in Vaswani et al. (2017), arxiv.org/abs/1706.03762
"""
from typing import List
import math
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.attention.base_attention import (
Attendable, get_attention_states, get_attention_mask)
from neuralmonkey.decorators import tensor
from neuralmonkey.attention.scaled_dot_product import attention
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.model.stateful import (TemporalStateful,
TemporalStatefulWithOutput)
from neuralmonkey.nn.utils import dropout
from neuralmonkey.tf_utils import get_variable, layer_norm
def position_signal(dimension: int, length: tf.Tensor) -> tf.Tensor:
# Code simplified and copied from github.com/tensorflow/tensor2tensor
# TODO write this down on a piece of paper and understand the code and
# compare it to the paper
positions = tf.to_float(tf.range(length))
num_timescales = dimension // 2
# see: github.com/tensorflow/tensor2tensor/blob/v1.5.5/tensor2tensor/
# layers/common_attention.py#L425
log_timescale_increment = math.log(1.0e4) / (num_timescales - 1)
inv_timescales = tf.exp(tf.range(num_timescales, dtype=tf.float32)
* -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(
inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(dimension, 2)]])
signal = tf.reshape(signal, [1, length, dimension])
return signal
class TransformerLayer(TemporalStateful):
def __init__(self, states: tf.Tensor, mask: tf.Tensor) -> None:
self._states = states
self._mask = mask
@property
def temporal_states(self) -> tf.Tensor:
return self._states
@property
def temporal_mask(self) -> tf.Tensor:
return self._mask
# pylint: disable=too-many-instance-attributes
class TransformerEncoder(ModelPart, TemporalStatefulWithOutput):
# pylint: disable=too-many-arguments,too-many-locals
def __init__(self,
name: str,
input_sequence: TemporalStateful,
ff_hidden_size: int,
depth: int,
n_heads: int,
dropout_keep_prob: float = 1.0,
attention_dropout_keep_prob: float = 1.0,
target_space_id: int = None,
use_att_transform_bias: bool = False,
use_positional_encoding: bool = True,
input_for_cross_attention: Attendable = None,
n_cross_att_heads: int = None,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Create an encoder of the Transformer model.
Described in Vaswani et al. (2017), arxiv.org/abs/1706.03762
Arguments:
input_sequence: Embedded input sequence.
name: Name of the decoder. Should be unique accross all Neural
Monkey objects.
reuse: Reuse the model variables.
dropout_keep_prob: Probability of keeping a value during dropout.
target_space_id: Specifies the modality of the target space.
use_att_transform_bias: Add bias when transforming qkv vectors
for attention.
use_positional_encoding: If True, position encoding signal is added
to the input.
Keyword arguments:
ff_hidden_size: Size of the feedforward sublayers.
n_heads: Number of the self-attention heads.
depth: Number of sublayers.
attention_dropout_keep_prob: Probability of keeping a value
during dropout on the attention output.
input_for_cross_attention: An attendable model part that is
attended using cross-attention on every layer of the decoder,
analogically to how encoder is attended in the decoder.
n_cross_att_heads: Number of heads used in the cross-attention.
"""
check_argument_types()
ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
initializers)
self.input_sequence = input_sequence
self.ff_hidden_size = ff_hidden_size
self.depth = depth
self.n_heads = n_heads
self.dropout_keep_prob = dropout_keep_prob
self.attention_dropout_keep_prob = attention_dropout_keep_prob
self.target_space_id = target_space_id
self.use_att_transform_bias = use_att_transform_bias
self.use_positional_encoding = use_positional_encoding
self.input_for_cross_attention = input_for_cross_attention
self.n_cross_att_heads = n_cross_att_heads
if self.depth <= 0:
raise ValueError("Depth must be a positive integer.")
if self.ff_hidden_size <= 0:
raise ValueError("Feed forward hidden size must be a "
"positive integer.")
if self.dropout_keep_prob <= 0.0 or self.dropout_keep_prob > 1.0:
raise ValueError("Dropout keep prob must be inside (0,1].")
if (self.attention_dropout_keep_prob <= 0.0
or self.attention_dropout_keep_prob > 1.0):
raise ValueError("Dropout keep prob for attn must be in (0,1].")
if self.target_space_id is not None and (self.target_space_id >= 32
or self.target_space_id < 0):
raise ValueError(
"If provided, the target space ID should be between 0 and 31. "
"Was: {}".format(self.target_space_id))
if (input_for_cross_attention is None) != (n_cross_att_heads is None):
raise ValueError(
"Either both input_for_cross_attention and n_cross_att_heads "
"must be provided or none of them.")
self._variable_scope.set_initializer(tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
# pylint: enable=too-many-arguments,too-many-locals
@property
def model_dimension(self) -> int:
dim = self.input_sequence.dimension
if self.input_for_cross_attention is not None:
cross_att_dim = get_attention_states(
self.input_for_cross_attention).get_shape()[-1].value
if cross_att_dim != dim:
raise ValueError(
"The input for cross-attention must be of the same "
"dimension as the model, was {}.".format(cross_att_dim))
return dim
@tensor
def output(self) -> tf.Tensor:
return tf.reduce_sum(self.temporal_states, axis=1)
@tensor
def modality_matrix(self) -> tf.Tensor:
"""Create an embedding matrix for varyining target modalities.
Used to embed different target space modalities in the tensor2tensor
models (e.g. during the zero-shot translation).
"""
emb_size = self.input_sequence.temporal_states.shape.as_list()[-1]
return get_variable(
name="target_modality_embedding_matrix",
shape=[32, emb_size],
dtype=tf.float32,
initializer=tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
@tensor
def target_modality_embedding(self) -> tf.Tensor:
"""Gather correct embedding of the target space modality.
See TransformerEncoder.modality_matrix for more information.
"""
return tf.gather(self.modality_matrix,
tf.constant(self.target_space_id))
@tensor
def encoder_inputs(self) -> tf.Tensor:
inputs = self.input_sequence.temporal_states
if self.target_space_id is not None:
inputs += tf.reshape(self.target_modality_embedding, [1, 1, -1])
length = tf.shape(inputs)[1]
if self.use_positional_encoding:
inputs += position_signal(self.model_dimension, length)
return dropout(inputs, self.dropout_keep_prob, self.train_mode)
def self_attention_sublayer(
self, prev_layer: TransformerLayer) -> tf.Tensor:
"""Create the encoder self-attention sublayer."""
# Layer normalization
normalized_states = layer_norm(prev_layer.temporal_states)
# Run self-attention
self_context, _ = attention(
queries=normalized_states,
keys=normalized_states,
values=normalized_states,
keys_mask=prev_layer.temporal_mask,
num_heads=self.n_heads,
dropout_callback=lambda x: dropout(
x, self.attention_dropout_keep_prob, self.train_mode),
use_bias=self.use_att_transform_bias)
# Apply dropout
self_context = dropout(
self_context, self.dropout_keep_prob, self.train_mode)
# Add residual connections
return self_context + prev_layer.temporal_states
def cross_attention_sublayer(self, queries: tf.Tensor) -> tf.Tensor:
assert self.cross_attention_sublayer is not None
assert self.n_cross_att_heads is not None
assert self.input_for_cross_attention is not None
encoder_att_states = get_attention_states(
self.input_for_cross_attention)
encoder_att_mask = get_attention_mask(self.input_for_cross_attention)
# Layer normalization
normalized_queries = layer_norm(queries)
encoder_context, _ = attention(
queries=normalized_queries,
keys=encoder_att_states,
values=encoder_att_states,
keys_mask=encoder_att_mask,
num_heads=self.n_cross_att_heads,
dropout_callback=lambda x: dropout(
x, self.attention_dropout_keep_prob, self.train_mode),
use_bias=self.use_att_transform_bias)
# Apply dropout
encoder_context = dropout(
encoder_context, self.dropout_keep_prob, self.train_mode)
# Add residual connections
return encoder_context + queries
def feedforward_sublayer(self, layer_input: tf.Tensor) -> tf.Tensor:
"""Create the feed-forward network sublayer."""
# Layer normalization
normalized_input = layer_norm(layer_input)
# Feed-forward network hidden layer + ReLU
ff_hidden = tf.layers.dense(
normalized_input, self.ff_hidden_size, activation=tf.nn.relu,
name="hidden_state")
# Apply dropout on hidden layer activations
ff_hidden = dropout(ff_hidden, self.dropout_keep_prob, self.train_mode)
# Feed-forward output projection
ff_output = tf.layers.dense(
ff_hidden, self.model_dimension, name="output")
# Apply dropout on feed-forward output projection
ff_output = dropout(ff_output, self.dropout_keep_prob, self.train_mode)
# Add residual connections
return ff_output + layer_input
def layer(self, level: int) -> TransformerLayer:
# Recursive implementation. Outputs of the zeroth layer
# are normalized inputs.
if level == 0:
return TransformerLayer(self.encoder_inputs, self.temporal_mask)
# Compute the outputs of the previous layer
prev_layer = self.layer(level - 1)
with tf.variable_scope("layer_{}".format(level - 1)):
with tf.variable_scope("self_attention"):
self_context = self.self_attention_sublayer(prev_layer)
if self.input_for_cross_attention is not None:
with tf.variable_scope("cross_attention"):
self_context = self.cross_attention_sublayer(self_context)
with tf.variable_scope("feedforward"):
output_states = self.feedforward_sublayer(self_context)
# Layer normalization on the encoder outputs
if self.depth == level:
output_states = layer_norm(output_states)
return TransformerLayer(states=output_states, mask=self.temporal_mask)
@tensor
def temporal_states(self) -> tf.Tensor:
return self.layer(self.depth).temporal_states
@tensor
def temporal_mask(self) -> tf.Tensor:
return self.input_sequence.temporal_mask
@property
def dependencies(self) -> List[str]:
deps = super().dependencies
if self.input_for_cross_attention is not None:
return deps + ["input_for_cross_attention"]
return deps
| bsd-3-clause | 10628950ed3fdbc2e29225c2c6805a6a | 37.960606 | 79 | 0.623085 | 4.123477 | false | false | false | false |
ufal/neuralmonkey | scripts/caffe_image_features.py | 1 | 4420 | import argparse
import sys
import os
os.environ['GLOG_minloglevel'] = '4'
sys.path.append("caffe/python")
import caffe
import numpy as np
import skimage
def crop_image(x, target_height=227, target_width=227):
image = skimage.img_as_float(skimage.io.imread(x)).astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = skimage.transform.resize(image, (target_height,target_width))
elif height < width:
resized_image = skimage.transform.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = skimage.transform.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return skimage.transform.resize(resized_image, (target_height, target_width))
class CNN:
def __init__(self, deploy, model, mean, batch_size=10, width=227, height=227):
self.deploy = deploy
self.model = model
self.mean = mean
self.batch_size = batch_size
self.net, self.transformer = self.get_net()
self.net.blobs['data'].reshape(self.batch_size, 3, height, width)
self.width = width
self.height = height
def get_net(self):
#caffe.set_mode_cpu()
net = caffe.Net(self.deploy, self.model, caffe.TEST)
transformer = caffe.io.Transformer({'data':net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(self.mean).mean(1).mean(1))
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2,1,0))
return net, transformer
def get_features(self, image_list, layers='fc7', layer_sizes=[4096]):
iter_until = len(image_list) + self.batch_size
all_feats = np.zeros([len(image_list)] + layer_sizes, dtype=np.float32)
for start, end in zip(list(range(0, iter_until, self.batch_size)), \
list(range(self.batch_size, iter_until, self.batch_size))):
image_batch_file = image_list[start:end]
image_batch = np.array([crop_image(x, target_width=self.width, target_height=self.height) for x in image_batch_file])
caffe_in = np.zeros(np.array(image_batch.shape)[[0,3,1,2]], dtype=np.float32)
for idx, in_ in enumerate(image_batch):
caffe_in[idx] = self.transformer.preprocess('data', in_)
out = self.net.forward_all(blobs=[layers], **{'data':caffe_in})
feats = out[layers]
all_feats[start:end] = feats
return all_feats
def shape(string):
return [int(s) for s in string.split("x")]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Image feature extraction")
parser.add_argument("--model-prototxt", type=str, required=True)
parser.add_argument("--model-parameters", type=str, required=True)
parser.add_argument("--img-mean", type=str, required=True)
parser.add_argument("--feature-layer", type=str, required=True)
parser.add_argument("--image-directory", type=str, required=True)
parser.add_argument("--image-list", type=argparse.FileType('r'), required=True)
parser.add_argument("--output-file", type=argparse.FileType('wb'), required=True)
parser.add_argument("--img-shape", type=shape, required=True)
parser.add_argument("--output-shape", type=shape, required=True)
args = parser.parse_args()
cnn = CNN(deploy=args.model_prototxt, model=args.model_parameters, mean=args.img_mean,
batch_size=10, width=args.img_shape[0], height=args.img_shape[1])
path_list = [os.path.join(args.image_directory, f.rstrip()) for f in args.image_list]
features_shape = [args.output_shape[2]] + args.output_shape[:2]
features = cnn.get_features(path_list, layers=args.feature_layer, layer_sizes=features_shape)
np.save(args.output_file, features.transpose((0, 2, 3, 1)))
| bsd-3-clause | 53d1a8cec8541cbc6a43296e708b369c | 41.095238 | 129 | 0.647285 | 3.335849 | false | false | false | false |
ufal/neuralmonkey | scripts/postedit_reconstruct_data.py | 2 | 2259 | #!/usr/bin/env python3
"""
This a script that takes the result of automatic postediting encoded as a
sequence of <keep>, <delete> and insert operations and applies them on the
original text being post-edited.
The inverse script to this one is 'postedit_prepare_data.py'.
"""
import argparse
from neuralmonkey.processors.german import GermanPreprocessor
from neuralmonkey.processors.german import GermanPostprocessor
from postedit_prepare_data import load_tokenized
# TODO make reconstruct a postprocessor
def reconstruct(source, edits):
index = 0
target = []
for edit in edits:
if edit == '<keep>':
if index < len(source):
target.append(source[index])
index += 1
elif edit == '<delete>':
index += 1
else:
target.append(edit)
# we may have created a shorter sequence of edit ops due to the
# decoder limitations -> now copy the rest of source
if index < len(source):
target.extend(source[index:])
return target
def main():
parser = argparse.ArgumentParser(
description="Convert postediting target data to sequence of edits")
parser.add_argument("--edits", type=argparse.FileType('r'), required=True)
parser.add_argument("--translated-sentences",
type=argparse.FileType('r'), required=True)
parser.add_argument("--target-german", type=bool, default=False)
args = parser.parse_args()
postprocess = lambda x: x
preprocess = None # type: GermanPreprocessor
if args.target_german:
# pylint: disable=redefined-variable-type
postprocess = GermanPostprocessor()
preprocess = GermanPreprocessor()
trans_sentences = load_tokenized(
args.translated_sentences, preprocess=preprocess)
edit_sequences = load_tokenized(args.edits, preprocess=None)
for trans, edits in zip(trans_sentences, edit_sequences):
target = reconstruct(trans, edits)
# TODO refactor this (change postprocessor api)
print(" ".join(postprocess([target])[0]))
if __name__ == '__main__':
# edits = ['<keep>', 'ahoj', '<delete>', 'proc?']
# source = ['Karle', 'co', 'kdy']
# print reconstruct(source, edits)
main()
| bsd-3-clause | 7c005d1b00019bc6044ef2ea54cc6ee4 | 29.945205 | 78 | 0.656042 | 4.06295 | false | false | false | false |
ledatelescope/bifrost | python/bifrost/map.py | 1 | 7421 |
# Copyright (c) 2016-2022, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python2 compatibility
from __future__ import absolute_import
import sys
if sys.version_info > (3,):
long = int
from bifrost.libbifrost import _bf, _check, _array
from bifrost.ndarray import asarray
import numpy as np
import ctypes
import glob
import os
from bifrost.libbifrost_generated import BF_MAP_KERNEL_DISK_CACHE
from bifrost import telemetry
telemetry.track_module()
def _is_literal(x):
return isinstance(x, (int, long, float, complex))
def _convert_to_array(arg):
if _is_literal(arg):
arr = np.array(arg)
if isinstance(arg, (int, long)) and -(1 << 31) <= arg < (1 << 31):
arr = arr.astype(np.int32)
# TODO: Any way to decide when these should be double-precision?
elif isinstance(arg, float):
arr = arr.astype(np.float32)
elif isinstance(arg, complex):
arr = arr.astype(np.complex64)
arr.flags['WRITEABLE'] = False
arg = arr
return asarray(arg)
def map(func_string, data, axis_names=None, shape=None,
func_name=None, extra_code=None,
block_shape=None, block_axes=None):
"""Apply a function to a set of ndarrays.
Args:
func_string (str): The function to apply to the arrays, as a string (see
below for examples).
data (dict): Map of string names to ndarrays or scalars.
axis_names (list): List of string names by which each axis is referenced
in func_string.
shape: The shape of the computation. If None, the broadcast shape
of all data arrays is used.
func_name (str): Name of the function, for debugging purposes.
extra_code (str): Additional code to be included at global scope.
block_shape: The 2D shape of the thread block (y,x) with which the kernel
is launched.
This is a performance tuning parameter.
If NULL, a heuristic is used to select the block shape.
Changes to this parameter do _not_ require re-compilation of
the kernel.
block_axes: List of axis indices (or names) specifying the 2 computation
axes to which the thread block (y,x) is mapped.
This is a performance tuning parameter.
If NULL, a heuristic is used to select the block axes.
Values may be negative for reverse indexing.
Changes to this parameter _do_ require re-compilation of the
kernel.
Note:
Only GPU computation is currently supported.
Examples::
# Add two arrays together
bf.map("c = a + b", {'c': c, 'a': a, 'b': b})
# Compute outer product of two arrays
bf.map("c(i,j) = a(i) * b(j)",
{'c': c, 'a': a, 'b': b},
axis_names=('i','j'))
# Split the components of a complex array
bf.map("a = c.real; b = c.imag", {'c': c, 'a': a, 'b': b})
# Raise an array to a scalar power
bf.map("c = pow(a, p)", {'c': c, 'a': a, 'p': 2.0})
# Slice an array with a scalar index
bf.map("c(i) = a(i,k)", {'c': c, 'a': a, 'k': 7}, ['i'], shape=c.shape)
"""
try:
func_string = func_string.encode()
if func_name is not None:
func_name = func_name.encode()
if extra_code is not None:
extra_code = extra_code.encode()
except AttributeError:
# Python2 catch
pass
narg = len(data)
ndim = len(shape) if shape is not None else 0
arg_arrays = []
args = []
arg_names = []
if block_axes is not None:
# Allow referencing axes by name
block_axes = [axis_names.index(bax) if isinstance(bax, str)
else bax
for bax in block_axes]
if block_axes is not None and len(block_axes) != 2:
raise ValueError("block_axes must contain exactly 2 entries")
if block_shape is not None and len(block_shape) != 2:
raise ValueError("block_shape must contain exactly 2 entries")
for key, arg in data.items():
arg = _convert_to_array(arg)
# Note: We must keep a reference to each array lest they be garbage
# collected before their corresponding BFarray is used.
arg_arrays.append(arg)
args.append(arg.as_BFarray())
arg_names.append(key)
_check(_bf.bfMap(ndim, _array(shape, dtype=ctypes.c_long),
_array(axis_names),
narg, _array(args), _array(arg_names),
func_name, func_string, extra_code,
_array(block_shape), _array(block_axes)))
def list_map_cache():
output = "Cache enabled: %s" % ('yes' if BF_MAP_KERNEL_DISK_CACHE else 'no')
if BF_MAP_KERNEL_DISK_CACHE:
cache_path = os.path.join(os.path.expanduser('~'), '.bifrost',
_bf.BF_MAP_KERNEL_DISK_CACHE_SUBDIR)
try:
with open(os.path.join(cache_path, _bf.BF_MAP_KERNEL_DISK_CACHE_VERSION_FILE), 'r') as fh:
version = fh.read()
mapcache, runtime, driver = version.split(None, 2)
mapcache = int(mapcache, 10)
mapcache = "%i.%i" % (mapcache//1000, (mapcache//10) % 1000)
runtime = int(runtime, 10)
runtime = "%i.%i" % (runtime//1000, (runtime//10) % 1000)
driver = int(driver, 10)
driver = "%i.%i" % (driver//1000, (driver//10) % 1000)
entries = glob.glob(os.path.join(cache_path, '*.inf'))
output += "\nCache version: %s (map cache) %s (runtime), %s (driver)" % (mapcache, runtime, driver)
output += "\nCache entries: %i" % len(entries)
except OSError:
pass
print(output)
def clear_map_cache():
_check(_bf.bfMapClearCache())
| bsd-3-clause | 0023894c9677eae8af4baa997e0903db | 41.164773 | 111 | 0.612047 | 3.883307 | false | false | false | false |
ledatelescope/bifrost | python/bifrost/addon/leda/make_header.py | 1 | 13543 |
# Copyright (c) 2016-2021, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
make_header.py
=======
Modified by Hugh Garsden from Danny Price's dada.py and pipeline.py
Makes header.txt files that is used by corr2uvfit and DuCT.
"""
from __future__ import print_function, division
import numpy as np
import os, sys, ephem, datetime
from dateutil import tz
from bifrost import telemetry
telemetry.track_module()
class DadaReader(object):
"""
Dada file reader for raw LEDA correlator data.
Reads the header of a dada file
Parameters
----------
filename: str
name of dada file to open
n_int: int
number of integrations to read. If None, will only read header
inspectOnly: bool
If inspectOnly, will only read header and will not unpack data.
"""
DEFAULT_HEADER_SIZE = 4096
def __init__(self, filename, warnings, file_size):
self.filename = filename
self.warnings = warnings
self.file_size = file_size # Externally supplied
#print(filename, warnings, file_size)
self.generate_info()
def generate_info(self):
""" Parse dada header and form useful quantities. Calculate everything that can be calculated
based on what's in the header. For the rest, call them UNKNOWN. """
f = open(self.filename, 'rb')
headerstr = f.read(self.DEFAULT_HEADER_SIZE)
f.close()
header = {}
for line in headerstr.split('\n'):
try:
key, value = line.split()
except ValueError:
break
key = key.strip()
value = value.strip()
header[key] = value
self.source = header["SOURCE"]
self.mode = header['MODE']
if "UTC_START" in header:
self.datestamp = header['UTC_START']
else:
self.datestamp = "UNKNOWN"
if "CFREQ" in header:
self.c_freq_mhz = float(header['CFREQ'])
else:
self.c_freq_mhz = "UNKNOWN"
if "BW" in header:
self.bandwidth_mhz = float(header['BW'])
else:
self.bandwidth_mhz = "UNKNOWN"
if "NCHAN" in header:
self.n_chans = int(header["NCHAN"])
else:
self.n_chans = "UNKNOWN"
if "DATA_ORDER" in header:
self.data_order = header["DATA_ORDER"]
else:
self.data_order = "UNKNOWN"
have_size = True # If we can settle on a file size for the zipped files.
# Calculate number of integrations within this file
# File may not be complete, hence file_size_dsk is read too.
# However this is now complicated by zipping files. I am
# trying to be clever to figure the size. - HG
if self.filename[-8:] == ".dadazip": # Will not unzip to get actual size. Must be specified somehow.
if self.file_size: # We are given the complete file size which overrides everything else.
data_size_dsk = int(self.file_size)-self.DEFAULT_HEADER_SIZE
data_size_hdr = data_size_dsk
elif "FILE_SIZE" in header: # Hope that this is right
data_size_dsk = int(header["FILE_SIZE"]) # these data sizes don't include header
data_size_hdr = data_size_dsk
else: # Failure
if self.warnings:
print("WARNING: File is zipped and FILE_SIZE is not in header and file_size not supplied. ")
have_size = False
data_size_hdr = data_size_dsk = 0
else: # File not zipped. Can get true complete file size
data_size_dsk = os.path.getsize(self.filename)-self.DEFAULT_HEADER_SIZE
if "FILE_SIZE" in header:
data_size_hdr = int(header["FILE_SIZE"])
else:
data_size_hdr = data_size_dsk
if data_size_hdr != data_size_dsk:
if self.warnings:
print("WARNING: Data size in file doesn't match actual size. Using actual size.")
data_size = data_size_dsk # Settle on this as the size of the data
self.file_size = data_size+self.DEFAULT_HEADER_SIZE
# Try to be clever and generate values that can be generated, while leaving
# undefined values as UNKNOWN.
if "BYTES_PER_AVG" in header:
bpa = int(header["BYTES_PER_AVG"])
if "BYTES_PER_AVG" in header and have_size:
if data_size % bpa != 0:
if self.warnings:
print("WARNING: BYTES_PER_AVG does not result in an integral number of scans")
if "DATA_ORDER" in header and self.data_order == 'TIME_SUBSET_CHAN_TRIANGULAR_POL_POL_COMPLEX':
if self.warnings:
print('DATA_ORDER is TIME_SUBSET_CHAN_TRIANGULAR_POL_POL_COMPLEX, resetting BYTES_PER_AVG to',(109*32896*2*2+9*109*1270*2*2)*8,"(fixed)")
bpa = (109*32896*2*2+9*109*1270*2*2)*8
if data_size % bpa != 0 and self.warnings:
print("WARNING: BYTES_PER_AVG still doesn't give integral number of scans")
self.n_int = float(data_size) / bpa
else:
self.n_int = "UNKNOWN"
if "TSAMP" in header and "NAVG" in header:
# Calculate integration time per accumulation
tsamp = float(header["TSAMP"]) * 1e-6 # Sampling time per channel, in microseconds
navg = int(header["NAVG"]) # Number of averages per integration
int_tim = tsamp * navg # Integration time is tsamp * navg
self.t_int = int_tim
if "OBS_OFFSET" in header and "BYTES_PER_AVG" in header:
# Calculate the time offset since the observation started
byte_offset = int(header["OBS_OFFSET"])
num_int_since_obs_start = byte_offset // bpa
time_offset_since_obs_start = num_int_since_obs_start * int_tim
self.t_offset = time_offset_since_obs_start
else:
self.t_offset = "UNKNOWN"
else:
self.t_int = "UNKNOWN"
self.t_offset = "UNKNOWN"
class DadaTimes(object):
"""
Handle the generation of true times and RA/DEC for the observation in the DADA file.
Use pyephem for the tricky stuff. Includes the new calculation of RA/DEC in terms
of long/lat rather than just using long/lat.
"""
def time_at_timezone(self, dt, zone):
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(zone)
# Tell the datetime object that it's in UTC time zone since
# datetime objects are 'naive' by default
dt = dt.replace(tzinfo=from_zone)
# Convert time zone
return dt.astimezone(to_zone)
def __init__(self, header):
ovro = ephem.Observer()
(ovro.lat, ovro.lon, ovro.elev) = ('37.23978', '-118.281667', 1184.120)
if header.datestamp == "UNKNOWN" or header.t_offset == "UNKNOWN":
self.lst = "UNKNOWN"
self.date_str = "UNKNOWN"
self.time_str = "UNKNOWN"
self.localtime_str = "UNKNOWN"
self.lst_str = "UNKNOWN"
self.dec_str = "UNKNOWN"
return
# Calculate times including LST
dt = datetime.datetime.strptime(header.datestamp, "%Y-%m-%d-%H:%M:%S")+datetime.timedelta(seconds=header.t_offset)
ovro.date = dt
self.lst = ovro.sidereal_time()
localt = self.time_at_timezone(dt, "America/Los_Angeles")
self.date_str = "%04d%02d%02d"%(dt.year,dt.month,dt.day)
self.time_str = "%02d%02d%02d"%(dt.hour,dt.minute,dt.second)
self.localtime_str = "%02d%02d%02d"%(localt.hour,localt.minute,localt.second)
ra, dec = ovro.radec_of(0, np.pi/2)
self.lst_str = str(float(ra) / 2 / np.pi * 24)
self.dec_str = str(float(repr(dec))*180/np.pi)
#print("UTC START: %s"%dada_file.datestamp)
#print("TIME OFFSET: %s"%datetime.timedelta(seconds=dada_file.t_offset))
#print("NEW START: (%s, %s)"%(date_str, time_str))
def make_header(filename, write=True, warn=True, size=None):
"""
Create useful/necessary information about an observation. Used by other programs
like corr2uvfits and DuCT.
filename: DADA file, can be zipped
warn: print warnings
write: write a header.txt files
size: specify a true file size in case of zipped file
"""
# Get information from the DADA file
dada_file = DadaReader(filename, warn, size)
dada_times = DadaTimes(dada_file)
# Fill and either dump or return header. Slight differences depending on which.
header_params = {'N_CHANS' : dada_file.n_chans,
'N_SCANS' : dada_file.n_int,
'INT_TIME' : dada_file.t_int,
'FREQCENT' : dada_file.c_freq_mhz,
'BANDWIDTH' : dada_file.bandwidth_mhz,
'RA_HRS' : dada_times.lst_str,
'DEC_DEGS' : dada_times.dec_str,
'DATE' : dada_times.date_str,
'TIME' : dada_times.time_str,
'LOCALTIME' : dada_times.localtime_str,
'LST' : dada_times.lst_str,
'DATA_ORDER' : dada_file.data_order,
'FILE_SIZE' : dada_file.file_size,
'MODE' : dada_file.mode,
'TIME_OFFSET': dada_file.t_offset,
'SOURCE' : dada_file.source
}
if header_params["N_SCANS"] == "UNKNOWN":
n_scans = "UNKNOWN"
else:
n_scans = str(int(header_params['N_SCANS']))
if write: # This format is used by corr2uvfits and DuCT for transforming a DADA file.
output = open("header.txt","w")
output.write("# Generated by make_header.py\n\n")
output.write("FIELDNAME Zenith\n")
output.write("N_SCANS "+n_scans+"\n")
output.write("N_INPUTS 512\n")
output.write("N_CHANS "+str(header_params['N_CHANS'])+" # number of channels in spectrum\n")
output.write("CORRTYPE B # correlation type to use. 'C'(cross), 'B'(both), or 'A'(auto)\n")
output.write("INT_TIME "+str(header_params['INT_TIME'])+" # integration time of scan in seconds\n")
output.write("FREQCENT "+str(header_params['FREQCENT'])+" # observing center freq in MHz\n")
output.write("BANDWIDTH "+str(header_params['BANDWIDTH'])+" # total bandwidth in MHz\n")
output.write("# To phase to the zenith, these must be the HA, RA and Dec of the zenith.\n")
output.write("HA_HRS 0.000000 # the RA of the desired phase centre (hours)\n")
output.write("RA_HRS "+header_params['RA_HRS']+" # the RA of the desired phase centre (hours)\n")
output.write("DEC_DEGS "+str(header_params['DEC_DEGS'])+" # the DEC of the desired phase centre (degs)\n")
output.write("DATE "+header_params['DATE']+" # YYYYMMDD\n")
output.write("TIME "+header_params['TIME']+" # HHMMSS\n")
output.write("LOCALTIME "+str(dada_times.localtime_str)+"\n")
output.write("LST "+str(dada_times.lst)+"\n")
output.write("INVERT_FREQ 0 # 1 if the freq decreases with channel number\n")
output.write("CONJUGATE 1 # conjugate the raw data to fix sign convention problem if necessary\n")
output.write("GEOM_CORRECT 0\n")
output.close()
return header_params # If this function is called from other scripts (e.g. plot scripts) it can supply useful information
if __name__ == "__main__":
if len(sys.argv) == 2:
make_header(sys.argv[1])
elif len(sys.argv) == 3:
make_header(sys.argv[1],size=sys.argv[2])
else:
print("Expecting file name and optionally file size")
| bsd-3-clause | 3a070df3706248d3d106a0e8a7edac6c | 43.549342 | 153 | 0.594994 | 3.697243 | false | false | false | false |
ledatelescope/bifrost | python/bifrost/blocks/convert_visibilities.py | 1 | 9672 |
# Copyright (c) 2016-2021, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from bifrost.map import map as bf_map
from bifrost.pipeline import TransformBlock
from bifrost.DataType import DataType
from copy import deepcopy
from math import sqrt
from bifrost import telemetry
telemetry.track_module()
class ConvertVisibilitiesBlock(TransformBlock):
def __init__(self, iring, fmt,
*args, **kwargs):
super(ConvertVisibilitiesBlock, self).__init__(iring, *args, **kwargs)
self.ofmt = fmt
def define_valid_input_spaces(self):
return ('cuda',)
def on_sequence(self, iseq):
ihdr = iseq.header
itensor = ihdr['_tensor']
ilabels = itensor['labels']
assert(ilabels[0] == 'time')
ohdr = deepcopy(ihdr)
otensor = ohdr['_tensor']
if ilabels[1:] == ['freq', 'station_i', 'pol_i', 'station_j', 'pol_j']:
nchan, nstand, npol, nstand_j, npol_j = itensor['shape'][1:]
assert(nstand_j == nstand)
assert( npol_j == npol)
self.ifmt = 'matrix'
if self.ofmt == 'matrix':
ohdr['matrix_fill_mode'] = 'hermitian'
elif self.ofmt == 'storage':
nbaseline = nstand*(nstand+1)//2
del ohdr['matrix_fill_mode']
otensor['labels'] = ['time', 'baseline', 'freq', 'stokes']
otensor['shape'] = [-1, nbaseline, nchan, npol*npol]
time_units, freq_units, stand_units, pol_units, _, _ = itensor['units']
otensor['units'] = [time_units, None, freq_units, ('I', 'Q', 'U', 'V')]
else:
raise NotImplementedError("Unsupported conversion from " +
self.ifmt + " to " + self.ofmt)
elif ilabels[1:] == ['baseline', 'freq', 'stokes']:
nbaseline, nchan, nstokes = itensor['shape'][1:]
assert(nstokes == 1 or nstokes == 4)
npol = 1 if nstokes == 1 else 2
nstand = int(sqrt(8 * nbaseline + 1) - 1) // 2
time_units, baseline_units, freq_units, stokes_units, = itensor['units']
pol_units = ('X', 'Y') # TODO: Support L/R (using additional metadata?)
self.ifmt = 'storage'
if self.ofmt == 'matrix':
otensor['labels'] = ['time', 'freq', 'station_i', 'pol_i', 'station_j', 'pol_j']
otensor['shape'] = [-1, nchan, nstand, npol, nstand, npol]
otensor['units'] = [time_units, freq_units, None, pol_units, None, pol_units]
else:
raise NotImplementedError("Cannot convert input from %s to %s"
% (ilabels, self.ofmt))
return ohdr
def on_data(self, ispan, ospan):
idata = ispan.data
odata = ospan.data
itype = DataType(idata.dtype)
otype = DataType(odata.dtype)
if self.ifmt == 'matrix' and self.ofmt == 'matrix':
# Make a full-matrix copy of the lower-only input matrix
# odata[t,c,i,p,j,q] = idata[t,c,i,p,j,q] (lower filled only)
shape_nopols = list(idata.shape)
del shape_nopols[5]
del shape_nopols[3]
idata = idata.view(itype.as_vector(2))
odata = odata.view(otype.as_vector(2))
bf_map(
'''
bool in_lower_triangle = (i > j);
if( in_lower_triangle ) {
odata(t,c,i,0,j,0) = idata(t,c,i,0,j,0);
odata(t,c,i,1,j,0) = idata(t,c,i,1,j,0);
} else {
auto x = idata(t,c,j,0,i,0);
auto y = idata(t,c,j,1,i,0);
auto x1 = x[1];
x[0] = x[0].conj();
x[1] = y[0].conj();
if( i != j ) {
y[0] = x1.conj();
}
y[1] = y[1].conj();
odata(t,c,i,0,j,0) = x;
odata(t,c,i,1,j,0) = y;
}
''',
shape=shape_nopols, axis_names=['t', 'c', 'i', 'j'],
data={'idata': idata, 'odata': odata})
elif self.ifmt == 'matrix' and self.ofmt == 'storage':
assert(idata.shape[2] <= 2048)
idata = idata.view(itype.as_vector(2))
odata = odata.view(otype.as_vector(4))
# TODO: Support L/R as well as X/Y pols
bf_map('''
// TODO: This only works up to 2048 in single-precision
#define project_triangular(i, j) ((i)*((i)+1)/2 + (j))
int i = int((sqrt(8.f*(b)+1)-1)/2);
int j = b - project_triangular(i, 0);
auto x = idata(t,c,i,0,j,0);
auto y = idata(t,c,i,1,j,0);
if( i == j ) {
x[1] = y[0].conj();
}
idata_type::value_type eye(0, 1);
auto I = (x[0] + y[1]);
auto Q = (x[0] - y[1]);
auto U = (x[1] + y[0]);
auto V = (x[1] - y[0]) * eye;
odata(t,b,c,0) = odata_type(I,Q,U,V);
''',
shape=odata.shape[:-1], axis_names=['t', 'b', 'c'],
data={'idata': idata, 'odata': odata},
block_shape=[64,8]) # TODO: Tune this
#elif self.ifmt == 'matrix' and self.ofmt == 'triangular':
elif self.ifmt == 'storage' and self.ofmt == 'matrix':
oshape_nopols = list(odata.shape)
del oshape_nopols[5]
del oshape_nopols[3]
idata = idata.view(itype.as_vector(4))
odata = odata.view(otype.as_vector(2))
bf_map('''
bool in_upper_triangle = (i < j);
auto b = in_upper_triangle ? j*(j+1)/2 + i : i*(i+1)/2 + j;
auto IQUV = idata(t,b,c,0);
auto I = IQUV[0], Q = IQUV[1], U = IQUV[2], V = IQUV[3];
idata_type::value_type eye(0, 1);
auto xx = 0.5f*(I + Q);
auto xy = 0.5f*(U - V*eye);
auto yx = 0.5f*(U + V*eye);
auto yy = 0.5f*(I - Q);
if( i == j ) {
xy = yx.conj();
}
if( in_upper_triangle ) {
auto tmp_xy = xy;
xx = xx.conj();
xy = yx.conj();
yx = tmp_xy.conj();
yy = yy.conj();
}
odata(t,c,i,0,j,0) = odata_type(xx, xy);
odata(t,c,i,1,j,0) = odata_type(yx, yy);
''',
shape=oshape_nopols, axis_names=['t', 'c', 'i', 'j'],
data={'idata': idata, 'odata': odata},
block_shape=[64,8]) # TODO: Tune this
else:
raise NotImplementedError
def convert_visibilities(iring, fmt, *args, **kwargs):
"""Convert visibility data to a new format.
Supported values of 'fmt' are:
matrix, storage
Args:
iring (Ring or Block): Input data source.
fmt (str): The desired output format: matrix, storage.
*args: Arguments to ``bifrost.pipeline.TransformBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.TransformBlock``.
**Tensor semantics**::
Input: ['time', 'freq', 'station_i', 'pol_i', 'station_j', 'pol_j'], dtype = any complex, space = CUDA
fmt = 'matrix' (produces a fully-filled matrix from a lower-filled one)
Output: ['time', 'freq', 'station_i', 'pol_i', 'station_j', 'pol_j'], dtype = any complex, space = CUDA
fmt = 'storage' (suitable for common on-disk data formats such as UVFITS, FITS-IDI, MS etc.)
Output: ['time', 'baseline', 'freq', 'stokes'], dtype = any complex, space = CUDA
Input: ['time', 'baseline', 'freq', 'stokes'], dtype = any complex, space = CUDA
fmt = 'matrix' (fully-filled matrix suitable for linear algebra operations)
Output: ['time', 'freq', 'station_i', 'pol_i', 'station_j', 'pol_j'], dtype = any complex, space = CUDA
Returns:
ConvertVisibilitiesBlock: A new block instance.
"""
return ConvertVisibilitiesBlock(iring, fmt, *args, **kwargs)
| bsd-3-clause | ede8708991dbfdc068d82e8fe33153d7 | 44.838863 | 111 | 0.531638 | 3.475386 | false | false | false | false |
ledatelescope/bifrost | python/bifrost/psrdada.py | 1 | 9835 |
# Copyright (c) 2016-2021, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This provides an interface for reading/writing PSRDADA ring buffers.
bifrost.libpsrdada_generated is generated at build time using ctypesgen.py
PSRDADA must be built as a shared library to use this. This can be accomplished
by adding the following lines to psrdada/configure.in:
#AC_DISABLE_SHARED
LT_INIT
lib_LTLIBRARIES = libpsrdada.la
libtest_la_LDFLAGS = -version-info 0:0:0
"""
from __future__ import absolute_import, print_function
import bifrost.libpsrdada_generated as _dada
import numpy as np
from bifrost.ndarray import _address_as_buffer
from bifrost.libbifrost import EndOfDataStop
import ctypes
from bifrost import telemetry
telemetry.track_module()
def get_pointer_value(ptr):
return ctypes.c_void_p.from_buffer(ptr).value
class MultiLog(object):
count = 0
def __init__(self, name=None):
if name is None:
name = "MultiLog%i" % MultiLog.count
MultiLog.count += 1
self.obj = _dada.multilog_open(name, '\0')
def __del__(self):
_dada.multilog_close(self.obj)
class IpcBufBlock(object):
def __init__(self, buf, mutable=False):
self.buf = buf
self.ptr, self.nbyte, self.block_id = self.buf.open()
self.nbyte_commit = self.nbyte
self.ptr = get_pointer_value(self.ptr)
if self.ptr is not None:
self.data = np.ndarray(
shape=(self.nbyte,),
buffer=_address_as_buffer(self.ptr, self.nbyte),
dtype=np.uint8)
self.data.flags['WRITEABLE'] = mutable
def __del__(self):
self.close()
def commit(self, nbyte=None):
if nbyte is None:
nbyte = self.nbyte
self.nbyte_commit = nbyte
def close(self):
if self.ptr is not None:
self.buf.close(self.nbyte_commit)
self.ptr = None
def enable_eod(self):
#print('>ipcbuf_enable_eod')
if _dada.ipcbuf_enable_eod(self.buf.buf) < 0:
raise IOError("Failed to enable EOD flag")
def size_bytes(self):
return self.nbyte
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
class IpcBaseBuf(object):
def __init__(self, ipcbuf, mutable=False):
self.buf = ipcbuf
self.mutable = mutable
def size_bytes(self):
return _dada.ipcbuf_get_bufsz(self.buf)
def eod(self):
#print('>ipcbuf_eod')
return bool(_dada.ipcbuf_eod(self.buf))
def reset(self):
#print('>ipcbuf_reset')
if _dada.ipcbuf_reset(self.buf) < 0:
raise IOError("Failed to reset buffer")
def __iter__(self):
return self
def __next__(self):
block = IpcBufBlock(self, self.mutable)
if block.nbyte > 0:
return block
else:
del block
self.reset()
raise EndOfDataStop('IpcBufBlock empty')
def next(self):
return self.__next__()
def open(self):
raise NotImplementedError()
def close(self, nbyte):
raise NotImplementedError()
class IpcBaseIO(IpcBaseBuf):
def __init__(self, ipcio, mutable=False):
ipcbuf = ctypes.cast(ipcio, ctypes.POINTER(_dada.ipcbuf_t))
super(IpcBaseIO, self).__init__(ipcbuf, mutable)
self.io = ipcio
def stop(self):
#print('>ipcio_stop')
if _dada.ipcio_stop(self.io) < 0:
raise IOError("Failed to write EOD marker to block")
class IpcReadHeaderBuf(IpcBaseBuf):
def __init__(self, ipcbuf):
super(IpcReadHeaderBuf, self).__init__(ipcbuf)
def open(self):
nbyte = ctypes.c_uint64()
#print('>ipcbuf_get_next_read')
ptr = _dada.ipcbuf_get_next_read(self.buf, nbyte)
nbyte = nbyte.value
block_id = 0
return ptr, nbyte, block_id
def close(self, nbyte):
#print('>ipcbuf_mark_cleared')
if _dada.ipcbuf_mark_cleared(self.buf) < 0:
raise IOError("Failed to mark block as cleared")
class IpcWriteHeaderBuf(IpcBaseBuf):
def __init__(self, ipcbuf):
super(IpcWriteHeaderBuf, self).__init__(ipcbuf, mutable=True)
def open(self):
nbyte = self.size_bytes()
#print('>ipcbuf_get_next_write')
ptr = _dada.ipcbuf_get_next_write(self.buf)
block_id = 0
return ptr, nbyte, block_id
def close(self, nbyte):
#print('>ipcbuf_mark_filled')
if _dada.ipcbuf_mark_filled(self.buf, nbyte) < 0:
raise IOError("Failed to mark block as filled")
class IpcReadDataBuf(IpcBaseIO):
def __init__(self, ipcio):
super(IpcReadDataBuf, self).__init__(ipcio)
def open(self):
nbyte = ctypes.c_uint64()
block_id = ctypes.c_uint64()
#print('>ipcio_open_block_read')
ptr = _dada.ipcio_open_block_read(self.io, nbyte, block_id)
nbyte = nbyte.value
block_id = block_id.value
#print('block_id =', block_id)
return ptr, nbyte, block_id
def close(self, nbyte):
#print('>ipcio_close_block_read(nbyte=%i)' % nbyte)
if _dada.ipcio_close_block_read(self.io, nbyte) < 0:
raise IOError("Failed to close block for reading")
class IpcWriteDataBuf(IpcBaseIO):
def __init__(self, ipcio):
super(IpcWriteDataBuf, self).__init__(ipcio, mutable=True)
self.nbyte_commit = 0 # Default to committing nothing
def open(self):
nbyte = self.size_bytes()
block_id = ctypes.c_uint64()
#print('>ipcio_open_block_write')
ptr = _dada.ipcio_open_block_write(self.io, block_id)
block_id = block_id.value
#print('block_id =', block_id)
return ptr, nbyte, block_id
def close(self, nbyte):
#print('>ipcio_close_block_write(nbyte=%i)' % nbyte)
if _dada.ipcio_close_block_write(self.io, nbyte) < 0:
raise IOError("Failed to close block for writing")
class Hdu(object):
def __init__(self):
self._dada = _dada
self.log = MultiLog()
self.hdu = _dada.dada_hdu_create(self.log.obj)
self.connected = False
def __del__(self):
self.disconnect()
_dada.dada_hdu_destroy(self.hdu)
def _connect(self, buffer_key=0xDADA):
self.buffer_key = buffer_key
_dada.dada_hdu_set_key(self.hdu, self.buffer_key)
if _dada.dada_hdu_connect(self.hdu) < 0:
raise IOError("Could not connect to buffer '%x'" % self.buffer_key)
def _disconnect(self):
if _dada.dada_hdu_disconnect(self.hdu) < 0:
raise IOError("Could not disconnect from buffer '%x'" % self.buffer_key)
def _lock(self, mode):
self.mode = mode
if mode == 'read':
if _dada.dada_hdu_lock_read(self.hdu) < 0:
raise IOError("Could not lock buffer '%x' for reading" % self.buffer_key)
else:
if _dada.dada_hdu_lock_write(self.hdu) < 0:
raise IOError("Could not lock buffer '%x' for writing" % self.buffer_key)
def _unlock(self):
if self.mode == 'read':
if _dada.dada_hdu_unlock_read(self.hdu) < 0:
raise IOError("Could not unlock buffer '%x' for reading" % self.buffer_key)
else:
if _dada.dada_hdu_unlock_write(self.hdu) < 0:
raise IOError("Could not unlock buffer '%x' for writing" % self.buffer_key)
def relock(self):
self._unlock()
self._lock(self.mode)
def open_HACK(self):
if _dada.ipcio_open(self.data_block.io, 'w') < 0:
raise IOError("ipcio_open failed")
def connect_read(self, buffer_key=0xDADA):
self._connect(buffer_key)
self._lock('read')
self.header_block = IpcReadHeaderBuf(self.hdu.contents.header_block)
self.data_block = IpcReadDataBuf(self.hdu.contents.data_block)
self.connected = True
def connect_write(self, buffer_key=0xDADA):
self._connect(buffer_key)
self._lock('write')
self.header_block = IpcWriteHeaderBuf(self.hdu.contents.header_block)
self.data_block = IpcWriteDataBuf(self.hdu.contents.data_block)
self.connected = True
def disconnect(self):
if self.connected:
self._unlock()
self._disconnect()
self.connected = False
| bsd-3-clause | 68d36c1c8878d30d112c3c53f4827197 | 37.268482 | 91 | 0.629181 | 3.449667 | false | false | false | false |
ledatelescope/bifrost | python/bifrost/blocks/serialize.py | 1 | 11508 |
# Copyright (c) 2016-2020, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python2 compatibility
from __future__ import absolute_import, print_function
import sys
if sys.version_info < (3,):
range = xrange
from bifrost.pipeline import SinkBlock, SourceBlock
import os
import warnings
try:
import simplejson as json
except ImportError:
warnings.warn("Install simplejson for better performance", RuntimeWarning)
import json
import glob
from functools import reduce
from bifrost import telemetry
telemetry.track_module()
def _parse_bifrost_filename(fname):
inds = fname[fname.find('.bf.') + 4:].split('.')[:-1]
inds = [int(i) for i in inds]
frame0, ringlet_inds = inds[0], inds[1:]
return frame0, ringlet_inds
class BifrostReader(object):
def __init__(self, basename):
assert(basename.endswith('.bf'))
hdr_filename = basename + '.json'
with open(hdr_filename, 'r') as hdr_file:
self.header = json.load(hdr_file)
data_filenames = glob.glob(basename + '.*.dat')
inds = [_parse_bifrost_filename(fname) for fname in data_filenames]
frame0s, ringlet_inds = zip(*inds)
nringlets = [max(r) + 1 for r in zip(*ringlet_inds)]
# TODO: Support multiple ringlet axes (needed in SerializeBlock too)
assert(len(nringlets) <= 1)
self.nringlet = nringlets[0] if len(nringlets) else 0
if self.nringlet > 0:
ringlet_inds = [inds[0] for inds in ringlet_inds]
self.ringlet_files = []
for ringlet in range(self.nringlet):
ringlet_filenames = [f for f, r in zip(data_filenames, ringlet_inds)
if r == ringlet]
ringlet_filenames.sort()
ringlet_files = [open(f, 'rb') for f in ringlet_filenames]
self.ringlet_files.append(ringlet_files)
self.nfile = len(self.ringlet_files[0])
if not all([len(files) == self.nfile for files in self.ringlet_files]):
raise IOError("Number of files in each ringlet does not match")
else:
data_filenames.sort()
self.files = [open(f, 'rb') for f in data_filenames]
self.nfile = len(self.files)
self.cur_file = 0
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self.nringlet > 0:
for ringlet in self.ringlet_files:
for f in ringlet:
f.close()
else:
for f in self.files:
f.close()
def readinto(self, buf, frame_nbyte):
if self.cur_file == self.nfile:
return 0
nframe_read = 0
if self.nringlet > 0:
# First dimension of buf is ringlets
bufs = buf
nbyte_reads = [ringlet_file[self.cur_file].readinto(buf)
for ringlet_file, buf in zip(self.ringlet_files, bufs)]
nbyte_read = min(nbyte_reads)
else:
nbyte_read = self.files[self.cur_file].readinto(buf)
if nbyte_read % frame_nbyte != 0:
raise IOError("Unexpected end of file")
nframe_read += nbyte_read // frame_nbyte
while nbyte_read < buf[0].nbytes:
self.cur_file += 1
if self.cur_file == self.nfile:
break
if self.nringlet > 0:
nbyte_reads = [ringlet_file[self.cur_file].readinto(buf)
for ringlet_file, buf in zip(self.ringlet_files, bufs)]
nbyte_read = min(nbyte_reads)
else:
nbyte_read = self.files[self.cur_file].readinto(buf)
if nbyte_read % frame_nbyte != 0:
raise IOError("Unexpected end of file")
nframe_read += nbyte_read // frame_nbyte
return nframe_read
class DeserializeBlock(SourceBlock):
def __init__(self, filenames, gulp_nframe, *args, **kwargs):
super(DeserializeBlock, self).__init__(filenames, gulp_nframe, *args, **kwargs)
def create_reader(self, sourcename):
return BifrostReader(sourcename)
def on_sequence(self, ireader, sourcename):
return [ireader.header]
def on_data(self, reader, ospans):
ospan = ospans[0]
return [reader.readinto(ospan.data, ospan.frame_nbyte)]
def deserialize(filenames, gulp_nframe, *args, **kwargs):
"""Deserializes a data stream from a set of files using a simple data format
Sequence headers are read as JSON files, and sequence data are read
directly as binary from separate files.
The actual header and data files must have the following general form::
# Header
<filename>.json
# Single-ringlet data
<filename>.<frame_offset>.dat
# Multi-ringlet data
<filename>.<frame_offset>.<ringlet>.dat
See also: ``serialize``
Args:
filenames (list): List of input filenames (each ending with '.bf')
gulp_nframe (int): No. frames to read at a time.
*args: Arguments to ``bifrost.pipeline.SourceBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.SourceBlock``.
**Tensor semantics**::
Input: One data file per sequence
Output: [frame, ...], dtype = any, space = SYSTEM
Input: One data file per ringlet
Output: [ringlet, frame, ...], dtype = any, space = SYSTEM
Returns:
DeserializeBlock: A new block instance.
"""
return DeserializeBlock(filenames, gulp_nframe, *args, **kwargs)
# **TODO: Write a DeserializeBlock that does the inverse of this
class SerializeBlock(SinkBlock):
def __init__(self, iring, path, max_file_size=None, *args, **kwargs):
super(SerializeBlock, self).__init__(iring, *args, **kwargs)
if path is None:
path = ''
self.path = path
if max_file_size is None:
max_file_size = 1024**3
self.max_file_size = max_file_size
def _close_data_files(self):
if hasattr(self, 'ofiles'):
for ofile in self.ofiles:
ofile.close()
def _open_new_data_files(self, frame_offset):
self._close_data_files()
self.bytes_written = 0
if self.frame_axis == 0:
# No ringlets, we can write all data to one file
filenames = [self.basename + '.bf.%012i.dat' % frame_offset]
elif self.frame_axis == 1:
# Ringlets, we must write each to a separate file
ndigit = len(str(self.nringlet-1))
filenames = [self.basename + ('.bf.%012i.%0'+str(ndigit)+'i.dat') %
(frame_offset, i)
for i in range(self.nringlet)]
else:
# TODO: Need to deal with separating multiple ringlet axes
# E.g., separate each ringlet dim with a dot
# Will have to lift/project the indices
raise NotImplementedError("Multiple ringlet axes not supported")
# Open data files
self.ofiles = [open(fname, 'wb') for fname in filenames]
def on_sequence(self, iseq):
hdr = iseq.header
tensor = hdr['_tensor']
if hdr['name'] != '':
self.basename = hdr['name']
else:
self.basename = '%020i' % hdr['time_tag']
if self.path != '':
# TODO: May need more flexibility in path handling
# E.g., may want to keep subdirs from original name
self.basename = os.path.basename(self.basename)
self.basename = os.path.join(self.path, self.basename)
# Write sequence header file
with open(self.basename + '.bf.json', 'w') as hdr_file:
hdr_file.write(json.dumps(hdr, indent=4, sort_keys=True))
shape = tensor['shape']
self.frame_axis = shape.index(-1)
self.nringlet = reduce(lambda a, b: a * b, shape[:self.frame_axis], 1)
self._open_new_data_files(frame_offset=0)
def on_sequence_end(self, iseq):
self._close_data_files()
def on_data(self, ispan):
if self.nringlet == 1:
bytes_to_write = ispan.data.nbytes
else:
bytes_to_write = ispan.data[0].nbytes
# Check if file size limit has been reached
if self.bytes_written + bytes_to_write > self.max_file_size:
self._open_new_data_files(ispan.frame_offset)
self.bytes_written += bytes_to_write
# Write data to file(s)
if self.nringlet == 1:
ispan.data.tofile(self.ofiles[0])
else:
for r in range(self.nringlet):
ispan.data[r].tofile(self.ofiles[r])
def serialize(iring, path=None, max_file_size=None, *args, **kwargs):
"""Serializes a data stream to a set of files using a simple data format
Sequence headers are written as JSON files, and sequence data are written
directly as binary to separate files.
Filenames begin with the sequence name if present, or the time tag if not.
The general form is::
# Header
<name_or_time_tag>.bf.json
# Single-ringlet data
<name_or_time_tag>.bf.<frame_offset>.dat
# Multi-ringlet data
<name_or_time_tag>.bf.<frame_offset>.<ringlet>.dat
Args:
iring (Ring or Block): Input data source.o
path (str): Path specifying where to write output files.
max_file_size (int): Max no. bytes to write to a single file. If set to
-1, no limit is applied.
*args: Arguments to ``bifrost.pipeline.SinkBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.SinkBlock``.
**Tensor semantics**::
Input: [frame, ...], dtype = any, space = SYSTEM
Output: One data file per sequence
Input: [ringlet, frame, ...], dtype = any, space = SYSTEM
Output: One data file per ringlet
Returns:
SerializeBlock: A new block instance.
"""
return SerializeBlock(iring, path, max_file_size, *args, **kwargs)
| bsd-3-clause | cbd626e3315ab52ba518f65e2196d0af | 40.1 | 87 | 0.614095 | 3.774352 | false | false | false | false |
ledatelescope/bifrost | test/test_ndarray.py | 1 | 7630 |
# Copyright (c) 2016-2022, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import numpy as np
import bifrost as bf
import ctypes
from bifrost.libbifrost_generated import BF_CUDA_ENABLED
class NDArrayTest(unittest.TestCase):
def setUp(self):
self.known_vals = [[0,1],[2,3],[4,5]]
self.known_array = np.array(self.known_vals, dtype=np.float32)
def test_construct(self):
a = bf.ndarray(self.known_vals, dtype='f32')
np.testing.assert_equal(a, self.known_array)
def test_assign(self):
b = bf.ndarray(shape=(3,2), dtype='f32')
b[...] = self.known_array
np.testing.assert_equal(b, self.known_array)
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_space_copy(self):
c = bf.ndarray(self.known_vals, dtype='f32')
c = c.copy(space='cuda').copy(space='cuda_host').copy(space='system')
np.testing.assert_equal(c, self.known_array)
def run_contiguous_copy(self, space='system'):
a = np.random.rand(2,3,4,5)
a = a.astype(np.float64)
b = a.transpose(0,3,2,1).copy()
c = bf.zeros(a.shape, dtype=a.dtype, space='system')
c[...] = a
c = c.copy(space=space)
d = c.transpose(0,3,2,1).copy(space='system')
# Use ctypes to directly access the memory
b_data = ctypes.cast(b.ctypes.data, ctypes.POINTER(ctypes.c_double))
b_data = np.array([b_data[i] for i in range(b.size)])
d_data = ctypes.cast(d.ctypes.data, ctypes.POINTER(ctypes.c_double))
d_data = np.array([d_data[i] for i in range(d.size)])
np.testing.assert_equal(d_data, b_data)
def test_contiguous_copy(self):
self.run_contiguous_copy()
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_space_contiguous_copy(self):
self.run_contiguous_copy(space='cuda')
def run_slice_copy(self, space='system'):
a = np.random.rand(2,3,4,5)
a = a.astype(np.float64)
b = a[:,1:,:,:].copy()
c = bf.zeros(a.shape, dtype=a.dtype, space='system')
c[...] = a
c = c.copy(space=space)
d = c[:,1:,:,:].copy(space='system')
# Use ctypes to directly access the memory
b_data = ctypes.cast(b.ctypes.data, ctypes.POINTER(ctypes.c_double))
b_data = np.array([b_data[i] for i in range(b.size)])
d_data = ctypes.cast(d.ctypes.data, ctypes.POINTER(ctypes.c_double))
d_data = np.array([d_data[i] for i in range(d.size)])
np.testing.assert_equal(d_data, b_data)
def test_slice_copy(self):
self.run_slice_copy()
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_space_slice_copy(self):
self.run_slice_copy(space='cuda')
def run_contiguous_slice_copy(self, space='system'):
a = np.random.rand(2,3,4,5)
a = a.astype(np.float64)
b = a.transpose(0,3,2,1)[:,1:,:,:].copy()
c = bf.zeros(a.shape, dtype=a.dtype, space='system')
c[...] = a
c = c.copy(space=space)
d = c.transpose(0,3,2,1)[:,1:,:,:].copy(space='system')
# Use ctypes to directly access the memory
b_data = ctypes.cast(b.ctypes.data, ctypes.POINTER(ctypes.c_double))
b_data = np.array([b_data[i] for i in range(b.size)])
d_data = ctypes.cast(d.ctypes.data, ctypes.POINTER(ctypes.c_double))
d_data = np.array([d_data[i] for i in range(d.size)])
np.testing.assert_equal(d_data, b_data)
def test_contiguous_slice_copy(self):
self.run_contiguous_slice_copy()
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_space_contiguous_slice_copy(self):
self.run_contiguous_slice_copy(space='cuda')
def test_view(self):
d = bf.ndarray(self.known_vals, dtype='f32')
d = d.view(dtype='cf32')
np.testing.assert_equal(d, np.array([[0 + 1j], [2 + 3j], [4 + 5j]]))
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_str(self):
e = bf.ndarray(self.known_vals, dtype='f32', space='cuda')
self.assertEqual(str(e), str(self.known_array))
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_repr(self):
f = bf.ndarray(self.known_vals, dtype='f32', space='cuda')
repr_f = repr(f)
# Note: This chops off the class name
repr_f = repr_f[repr_f.find('('):]
repr_k = repr(self.known_array)
repr_k = repr_k[repr_k.find('('):]
# Remove whitespace (for some reason the indentation differs)
repr_f = repr_f.replace(' ', '')
repr_k = repr_k.replace(' ', '')
self.assertEqual(repr_f, repr_k)
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_zeros_like(self):
g = bf.ndarray(self.known_vals, dtype='f32', space='cuda')
g = bf.zeros_like(g)
g = g.copy('system')
known = np.zeros_like(self.known_array)
np.testing.assert_equal(g, known)
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_getitem(self):
g = bf.ndarray(self.known_vals, space='cuda')
np.testing.assert_equal(g[0].copy('system'), self.known_array[0])
np.testing.assert_equal(g[(0,)].copy('system'), self.known_array[(0,)])
np.testing.assert_equal(int(g[0,0]), self.known_array[0,0])
np.testing.assert_equal(g[:1,1:].copy('system'), self.known_array[:1,1:])
@unittest.skipUnless(BF_CUDA_ENABLED, "requires GPU support")
def test_setitem(self):
g = bf.zeros_like(self.known_vals, space='cuda')
g[...] = self.known_vals
np.testing.assert_equal(g.copy('system'), self.known_vals)
g[:1,1:] = [[999]]
np.testing.assert_equal(g.copy('system'), np.array([[0,999],[2,3],[4,5]]))
g[0,0] = 888
np.testing.assert_equal(g.copy('system'), np.array([[888,999],[2,3],[4,5]]))
g[0] = [99,88]
np.testing.assert_equal(g.copy('system'), np.array([[99,88],[2,3],[4,5]]))
g[:,1] = [77,66,55]
np.testing.assert_equal(g.copy('system'), np.array([[99,77],[2,66],[4,55]]))
| bsd-3-clause | 852389bf49de40e4d6bd17f5e2a56e48 | 48.545455 | 84 | 0.634731 | 3.264869 | false | true | false | false |
ledatelescope/bifrost | testbench/your_first_block.py | 1 | 3507 | #!/usr/bin/env python
# Copyright (c) 2017-2020, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# your_first_block.py
This testbench initializes a simple bifrost pipeline that reads from a binary file,
and then writes the data to an output file.
"""
# Python2 compatibility
from __future__ import print_function
import os
import numpy as np
import bifrost.pipeline as bfp
from bifrost.blocks import BinaryFileReadBlock, BinaryFileWriteBlock
import glob
from datetime import datetime
from copy import deepcopy
from pprint import pprint
class UselessAddBlock(bfp.TransformBlock):
def __init__(self, iring, n_to_add, *args, **kwargs):
super(UselessAddBlock, self).__init__(iring, *args, **kwargs)
self.n_to_add = n_to_add
def on_sequence(self, iseq):
ohdr = deepcopy(iseq.header)
ohdr["name"] += "_with_added_value"
return ohdr
def on_data(self, ispan, ospan):
in_nframe = ispan.nframe
out_nframe = in_nframe
idata = ispan.data + self.n_to_add
odata = ospan.data
odata[...] = idata
return out_nframe
class PrintStuffBlock(bfp.SinkBlock):
def __init__(self, iring, *args, **kwargs):
super(PrintStuffBlock, self).__init__(iring, *args, **kwargs)
self.n_iter = 0
def on_sequence(self, iseq):
print("[%s]" % datetime.now())
print(iseq.name)
pprint(iseq.header)
self.n_iter = 0
def on_data(self, ispan):
now = datetime.now()
if self.n_iter % 100 == 0:
print("[%s] %s" % (now, ispan.data))
self.n_iter += 1
if __name__ == "__main__":
# Setup pipeline
filenames = sorted(glob.glob('testdata/sin_data*.bin'))
b_read = BinaryFileReadBlock(filenames, 32768, 1, 'f32')
b_add = UselessAddBlock(b_read, n_to_add=100)
b_print = PrintStuffBlock(b_read)
b_print2 = PrintStuffBlock(b_add)
# Run pipeline
pipeline = bfp.get_default_pipeline()
print(pipeline.dot_graph())
pipeline.run()
| bsd-3-clause | da8cfbf465817a1e4c6ce7e6c371587a | 34.424242 | 83 | 0.692615 | 3.742796 | false | false | false | false |
ledatelescope/bifrost | python/bifrost/views/basic_views.py | 1 | 8870 |
# Copyright (c) 2016-2021, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division
from bifrost.pipeline import block_view
from bifrost.DataType import DataType
from bifrost.units import convert_units
from numpy import isclose
from bifrost import telemetry
telemetry.track_module()
def custom(block, hdr_transform):
"""An alias to `bifrost.pipeline.block_view`
"""
return block_view(block, hdr_transform)
def rename_axis(block, old, new):
def header_transform(hdr, old=old, new=new):
axis = hdr['_tensor']['labels'].index(old)
hdr['_tensor']['labels'][axis] = new
return hdr
return block_view(block, header_transform)
def reinterpret_axis(block, axis, label, scale=None, units=None):
""" Manually reinterpret the scale and/or units on an axis """
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, str):
axis = tensor['labels'].index(axis)
if label is not None:
tensor['labels'][axis] = label
if scale is not None:
tensor['scales'][axis] = scale
if units is not None:
tensor['units'][axis] = units
return hdr
return block_view(block, header_transform)
def reverse_scale(block, axis):
""" Manually reverse the scale factor on a given axis"""
def header_transform(hdr, axis=axis):
tensor = hdr['_tensor']
if isinstance(axis, str):
axis = tensor['labels'].index(axis)
tensor['scales'][axis][1] *= -1
return hdr
return block_view(block, header_transform)
def add_axis(block, axis, label=None, scale=None, units=None):
"""Add an extra dimension to the frame at position 'axis'
E.g., if the shape is [-1, 3, 2], then
selecting axis=1 would change the shape to be
[-1, 1, 3, 2].
Axis may be negative, or a string corresponding to an existing axis label,
in which case the new axis is inserted after the referenced axis.
"""
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, str):
axis = tensor['labels'].index(axis) + 1
if axis < 0:
axis += len(tensor['shape']) + 1
tensor['shape'].insert(axis, 1)
if 'labels' in tensor:
tensor['labels'].insert(axis, label)
if 'scales' in tensor:
tensor['scales'].insert(axis, scale)
if 'units' in tensor:
tensor['units'].insert(axis, units)
return hdr
return block_view(block, header_transform)
def delete_axis(block, axis):
"""Remove a unitary dimension from the frame
E.g., if the shape is [-1, 1, 3, 2], then
selecting axis=1 would change the shape to be
[-1, 3, 2].
Axis may be negative, or a string corresponding to an existing axis label.
"""
def header_transform(hdr, axis=axis):
tensor = hdr['_tensor']
specified_axis = axis
if isinstance(axis, str):
specified_axis = "'%s'" % specified_axis
axis = tensor['labels'].index(axis)
if axis < 0:
axis += len(tensor['shape']) + 1
if tensor['shape'][axis] != 1:
raise ValueError("Cannot delete non-unitary axis %s with shape %i"
% (specified_axis, tensor['shape'][axis]))
del tensor['shape'][axis]
if 'labels' in tensor:
del tensor['labels'][axis]
if 'scales' in tensor:
del tensor['scales'][axis]
if 'units' in tensor:
del tensor['units'][axis]
return hdr
return block_view(block, header_transform)
def astype(block, dtype):
def header_transform(hdr, new_dtype=dtype):
tensor = hdr['_tensor']
old_dtype = tensor['dtype']
old_itemsize = DataType(old_dtype).itemsize
new_itemsize = DataType(new_dtype).itemsize
old_axissize = old_itemsize * tensor['shape'][-1]
if old_axissize % new_itemsize:
raise ValueError("New type not compatible with data shape")
tensor['shape'][-1] = old_axissize // new_itemsize
tensor['dtype'] = dtype
return hdr
return block_view(block, header_transform)
def split_axis(block, axis, n, label=None):
# Set function attributes to enable capture in nested function (closure)
def header_transform(hdr, axis=axis, n=n, label=label):
tensor = hdr['_tensor']
if isinstance(axis, str):
axis = tensor['labels'].index(axis)
shape = tensor['shape']
if shape[axis] == -1:
# Axis is frame axis
# TODO: Should assert even division here instead?
# ***TODO: Why does pipeline deadlock when this doesn't divide?
hdr['gulp_nframe'] = (hdr['gulp_nframe'] - 1) // n + 1
else:
# Axis is not frame axis
if shape[axis] % n:
raise ValueError("Split does not evenly divide axis (%i // %i)" %
(tensor['shape'][axis], n))
shape[axis] //= n
shape.insert(axis + 1, n)
if 'units' in tensor:
tensor['units'].insert(axis + 1, tensor['units'][axis])
if 'labels' in tensor:
if label is None:
label = tensor['labels'][axis] + "_split"
tensor['labels'].insert(axis + 1, label)
if 'scales' in tensor:
tensor['scales'].insert(axis + 1, [0, tensor['scales'][axis][1]])
tensor['scales'][axis][1] *= n
return hdr
return block_view(block, header_transform)
def merge_axes(block, axis1, axis2, label=None):
def header_transform(hdr, axis1=axis1, axis2=axis2, label=label):
tensor = hdr['_tensor']
if isinstance(axis1, str):
axis1 = tensor['labels'].index(axis1)
if isinstance(axis2, str):
axis2 = tensor['labels'].index(axis2)
axis1, axis2 = sorted([axis1, axis2])
if axis2 != axis1 + 1:
raise ValueError("Merge axes must be adjacent")
n = tensor['shape'][axis2]
if n == -1:
# Axis2 is frame axis
raise ValueError("Second merge axis cannot be frame axis")
elif tensor['shape'][axis1] == -1:
# Axis1 is frame axis
hdr['gulp_nframe'] *= n
else:
# Neither axis is frame axis
tensor['shape'][axis1] *= n
del tensor['shape'][axis2]
if 'scales' in tensor and 'units' in tensor:
scale1 = tensor['scales'][axis1][1]
scale2 = tensor['scales'][axis2][1]
units1 = tensor['units'][axis1]
units2 = tensor['units'][axis2]
scale2 = convert_units(scale2, units2, units1)
if not isclose(scale1, n * scale2):
raise ValueError("Scales of merge axes do not line up: "
"%f != %f" % (scale1, n * scale2))
tensor['scales'][axis1][1] = scale2
del tensor['scales'][axis2]
del tensor['units'][axis2]
if 'labels' in tensor:
if label is not None:
tensor['labels'][axis1] = label
del tensor['labels'][axis2]
return hdr
return block_view(block, header_transform)
| bsd-3-clause | dcbd0ba286d51edb6225615217ce2b6b | 40.448598 | 81 | 0.609808 | 4.059497 | false | false | false | false |
ledatelescope/bifrost | docs/source/conf.py | 1 | 5080 | # -*- coding: utf-8 -*-
#
# bifrost documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 27 09:41:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'breathe']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md'] source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bifrost'
copyright = u'2017, ledatelescope'
author = u'ledatelescope'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.6'
# The full version, including alpha/beta/rc tags.
release = u'0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bifrostdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bifrost.tex', u'bifrost Documentation',
u'ledatelescope', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bifrost', u'bifrost Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bifrost', u'bifrost Documentation',
author, 'bifrost', 'A stream processing framework for high-throughput applications.',
'Miscellaneous'),
]
# -- Set up breathe -------------------------------------------------------
breathe_projects = {"bifrost": "../doxygen/xml"}
breathe_default_project = "bifrost"
| bsd-3-clause | c0a1cf71c182e9bc2246ba5ed68f1afd | 29.787879 | 90 | 0.666339 | 3.816679 | false | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.