after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def get_prediction(
self, start=None, end=None, dynamic=False, index=None, exog=None, **kwargs
):
r"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : bool, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
if (
self.model.simple_differencing
and not self.model._index_generated
and not self.model._index_dates
):
start = 0
else:
start = self.model._index[0]
# Handle start, end, dynamic
_start, _end, _out_of_sample, prediction_index = self.model._get_prediction_index(
start, end, index, silent=True
)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError(
"Out-of-sample forecasting in a model"
" with a regression component requires"
" additional exogenous values via the"
" `exog` argument."
)
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
try:
exog = exog.reshape(required_exog_shape)
except ValueError:
raise ValueError(
"Provided exogenous values are not of the"
" appropriate shape. Required %s, got %s."
% (str(required_exog_shape), str(exog.shape))
)
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
model_kwargs = self._init_kwds.copy()
model_kwargs["exog"] = exog
model = SARIMAX(endog, **model_kwargs)
model.update(self.params, transformed=True, includes_fixed=True)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == "obs":
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
kwargs[name] = mat[..., -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn(
"Exogenous array provided to predict, but additional data not"
" required. `exog` argument ignored.",
ValueWarning,
)
return super(SARIMAXResults, self).get_prediction(
start=start, end=end, dynamic=dynamic, index=index, exog=exog, **kwargs
)
|
def get_prediction(
self, start=None, end=None, dynamic=False, index=None, exog=None, **kwargs
):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : bool, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
if (
self.model.simple_differencing
and not self.model._index_generated
and not self.model._index_dates
):
start = 0
else:
start = self.model._index[0]
# Handle start, end, dynamic
_start, _end, _out_of_sample, prediction_index = self.model._get_prediction_index(
start, end, index, silent=True
)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError(
"Out-of-sample forecasting in a model"
" with a regression component requires"
" additional exogenous values via the"
" `exog` argument."
)
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
try:
exog = exog.reshape(required_exog_shape)
except ValueError:
raise ValueError(
"Provided exogenous values are not of the"
" appropriate shape. Required %s, got %s."
% (str(required_exog_shape), str(exog.shape))
)
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
model_kwargs = self._init_kwds.copy()
model_kwargs["exog"] = exog
model = SARIMAX(endog, **model_kwargs)
model.update(self.params, transformed=True, includes_fixed=True)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == "obs":
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
kwargs[name] = mat[..., -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn(
"Exogenous array provided to predict, but additional data not"
" required. `exog` argument ignored.",
ValueWarning,
)
return super(SARIMAXResults, self).get_prediction(
start=start, end=end, dynamic=dynamic, index=index, exog=exog, **kwargs
)
|
https://github.com/statsmodels/statsmodels/issues/6244
|
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:914: RuntimeWarning: overflow encountered in square
params_variance = (residuals[k_params_ma:]**2).mean()
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:976: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:487: RuntimeWarning: invalid value encountered in double_scalars
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:489: RuntimeWarning: invalid value encountered in sqrt
x = r / ((1 - r**2)**0.5)
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-a7c5e4baeb4f>", line 10, in <module>
res = mod.fit()
File "c:\git\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 605, in fit
start_params = self.untransform_params(start_params)
File "c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1495, in untransform_params
unconstrained[start] = constrained[start]**0.5
IndexError: index 6 is out of bounds for axis 0 with size 4
|
IndexError
|
def summary(self, alpha=0.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ""
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = list(self.model._spec.ar_lags)
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = list(self.model._spec.ma_lags)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = "(%s, %d, %s)" % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ""
has_seasonal = (
self.model.k_seasonal_ar + self.model.k_seasonal_diff + self.model.k_seasonal_ma
) > 0
if has_seasonal:
tmp = int(self.model.k_seasonal_ar / self.model.seasonal_periods)
if tmp == self.model.k_seasonal_ar_params:
order_seasonal_ar = int(
self.model.k_seasonal_ar / self.model.seasonal_periods
)
else:
order_seasonal_ar = list(self.model._spec.seasonal_ar_lags)
tmp = int(self.model.k_seasonal_ma / self.model.seasonal_periods)
if tmp == self.model.k_ma_params:
order_seasonal_ma = tmp
else:
order_seasonal_ma = list(self.model._spec.seasonal_ma_lags)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = "(%s, %d, %s, %d)" % (
str(order_seasonal_ar),
k_seasonal_diff,
str(order_seasonal_ma),
self.model.seasonal_periods,
)
if not order == "":
order += "x"
model_name = "%s%s%s" % (self.model.__class__.__name__, order, seasonal_order)
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, title="SARIMAX Results", model_name=model_name
)
|
def summary(self, alpha=0.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ""
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = "(%s, %d, %s)" % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ""
has_seasonal = (
self.model.k_seasonal_ar + self.model.k_seasonal_diff + self.model.k_seasonal_ma
) > 0
if has_seasonal:
if self.model.k_ar == self.model.k_ar_params:
order_seasonal_ar = int(
self.model.k_seasonal_ar / self.model.seasonal_periods
)
else:
order_seasonal_ar = tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_seasonal_ma = int(
self.model.k_seasonal_ma / self.model.seasonal_periods
)
else:
order_seasonal_ma = tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = "(%s, %d, %s, %d)" % (
str(order_seasonal_ar),
k_seasonal_diff,
str(order_seasonal_ma),
self.model.seasonal_periods,
)
if not order == "":
order += "x"
model_name = "%s%s%s" % (self.model.__class__.__name__, order, seasonal_order)
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name
)
|
https://github.com/statsmodels/statsmodels/issues/6244
|
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:914: RuntimeWarning: overflow encountered in square
params_variance = (residuals[k_params_ma:]**2).mean()
c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py:976: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:487: RuntimeWarning: invalid value encountered in double_scalars
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
c:\git\statsmodels\statsmodels\tsa\statespace\tools.py:489: RuntimeWarning: invalid value encountered in sqrt
x = r / ((1 - r**2)**0.5)
Traceback (most recent call last):
File "C:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-a7c5e4baeb4f>", line 10, in <module>
res = mod.fit()
File "c:\git\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 605, in fit
start_params = self.untransform_params(start_params)
File "c:\git\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1495, in untransform_params
unconstrained[start] = constrained[start]**0.5
IndexError: index 6 is out of bounds for axis 0 with size 4
|
IndexError
|
def start_params(self):
params = np.zeros(self.k_params, dtype=np.float64)
endog = self.endog.copy()
mask = ~np.any(np.isnan(endog), axis=1)
endog = endog[mask]
# 1. Factor loadings (estimated via PCA)
if self.k_factors > 0:
# Use principal components + OLS as starting values
res_pca = PCA(endog, ncomp=self.k_factors)
mod_ols = OLS(endog, res_pca.factors)
res_ols = mod_ols.fit()
# Using OLS params for the loadings tends to gives higher starting
# log-likelihood.
params[self._params_loadings] = res_ols.params.T.ravel()
# params[self._params_loadings] = res_pca.loadings.ravel()
# However, using res_ols.resid tends to causes non-invertible
# starting VAR coefficients for error VARs
# endog = res_ols.resid
endog = endog - np.dot(res_pca.factors, res_pca.loadings.T)
# 2. Exog (OLS on residuals)
if self.k_exog > 0:
mod_ols = OLS(endog, exog=self.exog)
res_ols = mod_ols.fit()
# In the form: beta.x1.y1, beta.x2.y1, beta.x1.y2, ...
params[self._params_exog] = res_ols.params.T.ravel()
endog = res_ols.resid
# 3. Factors (VAR on res_pca.factors)
stationary = True
if self.k_factors > 1 and self.factor_order > 0:
# 3a. VAR transition (OLS on factors estimated via PCA)
mod_factors = VAR(res_pca.factors)
res_factors = mod_factors.fit(maxlags=self.factor_order, ic=None, trend="nc")
# Save the parameters
params[self._params_factor_transition] = res_factors.params.T.ravel()
# Test for stationarity
coefficient_matrices = (
(
params[self._params_factor_transition]
.reshape(self.k_factors * self.factor_order, self.k_factors)
.T
)
.reshape(self.k_factors, self.k_factors, self.factor_order)
.T
)
stationary = is_invertible([1] + list(-coefficient_matrices))
elif self.k_factors > 0 and self.factor_order > 0:
# 3b. AR transition
Y = res_pca.factors[self.factor_order :]
X = lagmat(res_pca.factors, self.factor_order, trim="both")
params_ar = np.linalg.pinv(X).dot(Y)
stationary = is_invertible(np.r_[1, -params_ar.squeeze()])
params[self._params_factor_transition] = params_ar[:, 0]
# Check for stationarity
if not stationary and self.enforce_stationarity:
raise ValueError(
"Non-stationary starting autoregressive"
" parameters found with `enforce_stationarity`"
" set to True."
)
# 4. Errors
if self.error_order == 0:
if self.error_cov_type == "scalar":
params[self._params_error_cov] = endog.var(axis=0).mean()
elif self.error_cov_type == "diagonal":
params[self._params_error_cov] = endog.var(axis=0)
elif self.error_cov_type == "unstructured":
cov_factor = np.diag(endog.std(axis=0))
params[self._params_error_cov] = cov_factor[
self._idx_lower_error_cov
].ravel()
else:
mod_errors = VAR(endog)
res_errors = mod_errors.fit(maxlags=self.error_order, ic=None, trend="nc")
# Test for stationarity
coefficient_matrices = (
(
np.array(res_errors.params.T)
.ravel()
.reshape(self.k_endog * self.error_order, self.k_endog)
.T
)
.reshape(self.k_endog, self.k_endog, self.error_order)
.T
)
stationary = is_invertible([1] + list(-coefficient_matrices))
if not stationary and self.enforce_stationarity:
raise ValueError(
"Non-stationary starting error autoregressive"
" parameters found with"
" `enforce_stationarity` set to True."
)
# Get the error autoregressive parameters
if self.error_var:
params[self._params_error_transition] = np.array(
res_errors.params.T
).ravel()
else:
# In the case of individual autoregressions, extract just the
# diagonal elements
# TODO: can lead to explosive parameterizations
params[self._params_error_transition] = res_errors.params.T[
self._idx_error_diag
]
# Get the error covariance parameters
if self.error_cov_type == "scalar":
params[self._params_error_cov] = res_errors.sigma_u.diagonal().mean()
elif self.error_cov_type == "diagonal":
params[self._params_error_cov] = res_errors.sigma_u.diagonal()
elif self.error_cov_type == "unstructured":
try:
cov_factor = np.linalg.cholesky(res_errors.sigma_u)
except np.linalg.LinAlgError:
cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (
res_errors.sigma_u.diagonal().mean() ** 0.5
)
cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (
res_errors.sigma_u.diagonal().mean() ** 0.5
)
params[self._params_error_cov] = cov_factor[
self._idx_lower_error_cov
].ravel()
return params
|
def start_params(self):
params = np.zeros(self.k_params, dtype=np.float64)
endog = self.endog.copy()
# 1. Factor loadings (estimated via PCA)
if self.k_factors > 0:
# Use principal components + OLS as starting values
res_pca = PCA(endog, ncomp=self.k_factors)
mod_ols = OLS(endog, res_pca.factors)
res_ols = mod_ols.fit()
# Using OLS params for the loadings tends to gives higher starting
# log-likelihood.
params[self._params_loadings] = res_ols.params.T.ravel()
# params[self._params_loadings] = res_pca.loadings.ravel()
# However, using res_ols.resid tends to causes non-invertible
# starting VAR coefficients for error VARs
# endog = res_ols.resid
endog = endog - np.dot(res_pca.factors, res_pca.loadings.T)
# 2. Exog (OLS on residuals)
if self.k_exog > 0:
mod_ols = OLS(endog, exog=self.exog)
res_ols = mod_ols.fit()
# In the form: beta.x1.y1, beta.x2.y1, beta.x1.y2, ...
params[self._params_exog] = res_ols.params.T.ravel()
endog = res_ols.resid
# 3. Factors (VAR on res_pca.factors)
stationary = True
if self.k_factors > 1 and self.factor_order > 0:
# 3a. VAR transition (OLS on factors estimated via PCA)
mod_factors = VAR(res_pca.factors)
res_factors = mod_factors.fit(maxlags=self.factor_order, ic=None, trend="nc")
# Save the parameters
params[self._params_factor_transition] = res_factors.params.T.ravel()
# Test for stationarity
coefficient_matrices = (
(
params[self._params_factor_transition]
.reshape(self.k_factors * self.factor_order, self.k_factors)
.T
)
.reshape(self.k_factors, self.k_factors, self.factor_order)
.T
)
stationary = is_invertible([1] + list(-coefficient_matrices))
elif self.k_factors > 0 and self.factor_order > 0:
# 3b. AR transition
Y = res_pca.factors[self.factor_order :]
X = lagmat(res_pca.factors, self.factor_order, trim="both")
params_ar = np.linalg.pinv(X).dot(Y)
stationary = is_invertible(np.r_[1, -params_ar.squeeze()])
params[self._params_factor_transition] = params_ar[:, 0]
# Check for stationarity
if not stationary and self.enforce_stationarity:
raise ValueError(
"Non-stationary starting autoregressive"
" parameters found with `enforce_stationarity`"
" set to True."
)
# 4. Errors
if self.error_order == 0:
if self.error_cov_type == "scalar":
params[self._params_error_cov] = endog.var(axis=0).mean()
elif self.error_cov_type == "diagonal":
params[self._params_error_cov] = endog.var(axis=0)
elif self.error_cov_type == "unstructured":
cov_factor = np.diag(endog.std(axis=0))
params[self._params_error_cov] = cov_factor[
self._idx_lower_error_cov
].ravel()
else:
mod_errors = VAR(endog)
res_errors = mod_errors.fit(maxlags=self.error_order, ic=None, trend="nc")
# Test for stationarity
coefficient_matrices = (
(
np.array(res_errors.params.T)
.ravel()
.reshape(self.k_endog * self.error_order, self.k_endog)
.T
)
.reshape(self.k_endog, self.k_endog, self.error_order)
.T
)
stationary = is_invertible([1] + list(-coefficient_matrices))
if not stationary and self.enforce_stationarity:
raise ValueError(
"Non-stationary starting error autoregressive"
" parameters found with"
" `enforce_stationarity` set to True."
)
# Get the error autoregressive parameters
if self.error_var:
params[self._params_error_transition] = np.array(
res_errors.params.T
).ravel()
else:
# In the case of individual autoregressions, extract just the
# diagonal elements
# TODO: can lead to explosive parameterizations
params[self._params_error_transition] = res_errors.params.T[
self._idx_error_diag
]
# Get the error covariance parameters
if self.error_cov_type == "scalar":
params[self._params_error_cov] = res_errors.sigma_u.diagonal().mean()
elif self.error_cov_type == "diagonal":
params[self._params_error_cov] = res_errors.sigma_u.diagonal()
elif self.error_cov_type == "unstructured":
try:
cov_factor = np.linalg.cholesky(res_errors.sigma_u)
except np.linalg.LinAlgError:
cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (
res_errors.sigma_u.diagonal().mean() ** 0.5
)
cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (
res_errors.sigma_u.diagonal().mean() ** 0.5
)
params[self._params_error_cov] = cov_factor[
self._idx_lower_error_cov
].ravel()
return params
|
https://github.com/statsmodels/statsmodels/issues/6230
|
[ 624.6127141 27.2029389 3732.25387731 6.90035198 0.77157448]
---------------------------------------------------------------------------
MissingDataError Traceback (most recent call last)
<ipython-input-15-f70718facc54> in <module>
9 endog2.iloc[4, :] = np.nan
10 mod = sm.tsa.DynamicFactor(endog2, k_factors=1, factor_order=1)
---> 11 print(mod.start_params)
~/projects/statsmodels/statsmodels/tsa/statespace/dynamic_factor.py in start_params(self)
451 # Use principal components + OLS as starting values
452 res_pca = PCA(endog, ncomp=self.k_factors)
--> 453 mod_ols = OLS(endog, res_pca.factors)
454 res_ols = mod_ols.fit()
...
MissingDataError: exog contains inf or nans
|
MissingDataError
|
def start_params(self):
params = np.zeros(self.k_params, dtype=np.float64)
# A. Run a multivariate regression to get beta estimates
endog = pd.DataFrame(self.endog.copy())
endog = endog.interpolate()
endog = endog.fillna(method="backfill").values
exog = None
if self.k_trend > 0 and self.k_exog > 0:
exog = np.c_[self._trend_data, self.exog]
elif self.k_trend > 0:
exog = self._trend_data
elif self.k_exog > 0:
exog = self.exog
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
mask = ~np.any(np.isnan(endog), axis=1)
endog = endog[mask]
if exog is not None:
exog = exog[mask]
# Regression and trend effects via OLS
trend_params = np.zeros(0)
exog_params = np.zeros(0)
if self.k_trend > 0 or self.k_exog > 0:
trendexog_params = np.linalg.pinv(exog).dot(endog)
endog -= np.dot(exog, trendexog_params)
if self.k_trend > 0:
trend_params = trendexog_params[: self.k_trend].T
if self.k_endog > 0:
exog_params = trendexog_params[self.k_trend :].T
# B. Run a VAR model on endog to get trend, AR parameters
ar_params = []
k_ar = self.k_ar if self.k_ar > 0 else 1
mod_ar = var_model.VAR(endog)
res_ar = mod_ar.fit(maxlags=k_ar, ic=None, trend="nc")
if self.k_ar > 0:
ar_params = np.array(res_ar.params).T.ravel()
endog = res_ar.resid
# Test for stationarity
if self.k_ar > 0 and self.enforce_stationarity:
coefficient_matrices = (
(ar_params.reshape(self.k_endog * self.k_ar, self.k_endog).T)
.reshape(self.k_endog, self.k_endog, self.k_ar)
.T
)
stationary = is_invertible([1] + list(-coefficient_matrices))
if not stationary:
warn(
"Non-stationary starting autoregressive parameters"
" found. Using zeros as starting parameters."
)
ar_params *= 0
# C. Run a VAR model on the residuals to get MA parameters
ma_params = []
if self.k_ma > 0:
mod_ma = var_model.VAR(endog)
res_ma = mod_ma.fit(maxlags=self.k_ma, ic=None, trend="nc")
ma_params = np.array(res_ma.params.T).ravel()
# Test for invertibility
if self.enforce_invertibility:
coefficient_matrices = (
(ma_params.reshape(self.k_endog * self.k_ma, self.k_endog).T)
.reshape(self.k_endog, self.k_endog, self.k_ma)
.T
)
invertible = is_invertible([1] + list(-coefficient_matrices))
if not invertible:
warn(
"Non-stationary starting moving-average parameters"
" found. Using zeros as starting parameters."
)
ma_params *= 0
# Transform trend / exog params from mean form to intercept form
if self.k_ar > 0 and (self.k_trend > 0 or self.mle_regression):
coefficient_matrices = (
(ar_params.reshape(self.k_endog * self.k_ar, self.k_endog).T)
.reshape(self.k_endog, self.k_endog, self.k_ar)
.T
)
tmp = np.eye(self.k_endog) - np.sum(coefficient_matrices, axis=0)
if self.k_trend > 0:
trend_params = np.dot(tmp, trend_params)
if self.mle_regression > 0:
exog_params = np.dot(tmp, exog_params)
# 1. Intercept terms
if self.k_trend > 0:
params[self._params_trend] = trend_params.ravel()
# 2. AR terms
if self.k_ar > 0:
params[self._params_ar] = ar_params
# 3. MA terms
if self.k_ma > 0:
params[self._params_ma] = ma_params
# 4. Regression terms
if self.mle_regression:
params[self._params_regression] = exog_params.ravel()
# 5. State covariance terms
if self.error_cov_type == "diagonal":
params[self._params_state_cov] = res_ar.sigma_u.diagonal()
elif self.error_cov_type == "unstructured":
cov_factor = np.linalg.cholesky(res_ar.sigma_u)
params[self._params_state_cov] = cov_factor[self._idx_lower_state_cov].ravel()
# 5. Measurement error variance terms
if self.measurement_error:
if self.k_ma > 0:
params[self._params_obs_cov] = res_ma.sigma_u.diagonal()
else:
params[self._params_obs_cov] = res_ar.sigma_u.diagonal()
return params
|
def start_params(self):
params = np.zeros(self.k_params, dtype=np.float64)
# A. Run a multivariate regression to get beta estimates
endog = pd.DataFrame(self.endog.copy())
endog = endog.interpolate()
endog = endog.fillna(method="backfill").values
exog = None
if self.k_trend > 0 and self.k_exog > 0:
exog = np.c_[self._trend_data, self.exog]
elif self.k_trend > 0:
exog = self._trend_data
elif self.k_exog > 0:
exog = self.exog
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
mask = ~np.any(np.isnan(endog), axis=1)
endog = endog[mask]
if exog is not None:
exog = exog[mask]
# Regression and trend effects via OLS
trend_params = np.zeros(0)
exog_params = np.zeros(0)
if self.k_trend > 0 or self.k_exog > 0:
trendexog_params = np.linalg.pinv(exog).dot(endog)
endog -= np.dot(exog, trendexog_params)
if self.k_trend > 0:
trend_params = trendexog_params[: self.k_trend].T
if self.k_endog > 0:
exog_params = trendexog_params[self.k_trend :].T
# B. Run a VAR model on endog to get trend, AR parameters
ar_params = []
k_ar = self.k_ar if self.k_ar > 0 else 1
mod_ar = var_model.VAR(endog)
res_ar = mod_ar.fit(maxlags=k_ar, ic=None, trend="nc")
if self.k_ar > 0:
ar_params = np.array(res_ar.params).T.ravel()
endog = res_ar.resid
# Test for stationarity
if self.k_ar > 0 and self.enforce_stationarity:
coefficient_matrices = (
(ar_params.reshape(self.k_endog * self.k_ar, self.k_endog).T)
.reshape(self.k_endog, self.k_endog, self.k_ar)
.T
)
stationary = is_invertible([1] + list(-coefficient_matrices))
if not stationary:
warn(
"Non-stationary starting autoregressive parameters"
" found. Using zeros as starting parameters."
)
ar_params *= 0
# C. Run a VAR model on the residuals to get MA parameters
ma_params = []
if self.k_ma > 0:
mod_ma = var_model.VAR(endog)
res_ma = mod_ma.fit(maxlags=self.k_ma, ic=None, trend="nc")
ma_params = np.array(res_ma.params.T).ravel()
# Test for invertibility
if self.enforce_invertibility:
coefficient_matrices = (
(ma_params.reshape(self.k_endog * self.k_ma, self.k_endog).T)
.reshape(self.k_endog, self.k_endog, self.k_ma)
.T
)
invertible = is_invertible([1] + list(-coefficient_matrices))
if not invertible:
warn(
"Non-stationary starting moving-average parameters"
" found. Using zeros as starting parameters."
)
ma_params *= 0
# Transform trend / exog params from mean form to intercept form
if self.k_ar > 0 and self.k_trend > 0 or self.mle_regression:
coefficient_matrices = (
(ar_params.reshape(self.k_endog * self.k_ar, self.k_endog).T)
.reshape(self.k_endog, self.k_endog, self.k_ar)
.T
)
tmp = np.eye(self.k_endog) - np.sum(coefficient_matrices, axis=0)
if self.k_trend > 0:
trend_params = np.dot(tmp, trend_params)
if self.mle_regression > 0:
exog_params = np.dot(tmp, exog_params)
# 1. Intercept terms
if self.k_trend > 0:
params[self._params_trend] = trend_params.ravel()
# 2. AR terms
if self.k_ar > 0:
params[self._params_ar] = ar_params
# 3. MA terms
if self.k_ma > 0:
params[self._params_ma] = ma_params
# 4. Regression terms
if self.mle_regression:
params[self._params_regression] = exog_params.ravel()
# 5. State covariance terms
if self.error_cov_type == "diagonal":
params[self._params_state_cov] = res_ar.sigma_u.diagonal()
elif self.error_cov_type == "unstructured":
cov_factor = np.linalg.cholesky(res_ar.sigma_u)
params[self._params_state_cov] = cov_factor[self._idx_lower_state_cov].ravel()
# 5. Measurement error variance terms
if self.measurement_error:
if self.k_ma > 0:
params[self._params_obs_cov] = res_ma.sigma_u.diagonal()
else:
params[self._params_obs_cov] = res_ar.sigma_u.diagonal()
return params
|
https://github.com/statsmodels/statsmodels/issues/6127
|
AttributeError Traceback (most recent call last)
<ipython-input-150-40d241e2864d> in <module>
2 model = VARMAX(train,exog=exog_train, order=order, trend=trend, enforce_stationarity=False, enforce_invertibility=False)
3 # fit model
----> 4 model.start_params
/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/statespace/varmax.py in start_params(self)
384 # Transform trend / exog params from mean form to intercept form
385 if self.k_ar > 0 and self.k_trend > 0 or self.mle_regression:
--> 386 print(ar_params)
387 tmp1= np.array(ar_params).reshape(
388 self.k_endog * self.k_ar, self.k_endog
AttributeError: 'list' object has no attribute 'reshape'
|
AttributeError
|
def fit(
self,
maxlag=None,
method="cmle",
ic=None,
trend="c",
transparams=True,
start_params=None,
solver="lbfgs",
maxiter=35,
full_output=1,
disp=1,
callback=None,
**kwargs,
):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array_like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See Also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
start_params = array_like(start_params, "start_params", ndim=1, optional=True)
method = method.lower()
if method not in ["cmle", "mle"]:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
# The parameters are no longer allowed to change in an instance
fit_params = (maxlag, method, ic, trend)
if self._fit_params is not None and self._fit_params != fit_params:
raise RuntimeError(REPEATED_FIT_ERROR.format(*self._fit_params))
if maxlag is None:
maxlag = int(round(12 * (nobs / 100.0) ** (1 / 4.0)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ["aic", "bic", "hqic", "t-stat"]:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr / arfit.nobs # needed for predict fcasterr
else: # method == "mle"
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError(
"Length of start params is %d. There"
" are %d parameters." % (len(start_params), k_trend + k_ar)
)
start_params = self._invtransparams(start_params)
if solver == "lbfgs":
kwargs.setdefault("pgtol", 1e-8)
kwargs.setdefault("factr", 1e2)
kwargs.setdefault("m", 12)
kwargs.setdefault("approx_grad", True)
mlefit = super(AR, self).fit(
start_params=start_params,
method=solver,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs,
)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(copy.copy(self), params, normalized_cov_params)
if method == "mle" and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
# Set fit params since completed the fit
if self._fit_params is None:
self._fit_params = fit_params
return ARResultsWrapper(arfit)
|
def fit(
self,
maxlag=None,
method="cmle",
ic=None,
trend="c",
transparams=True,
start_params=None,
solver="lbfgs",
maxiter=35,
full_output=1,
disp=1,
callback=None,
**kwargs,
):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array_like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See Also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
start_params = array_like(start_params, "start_params", ndim=1, optional=True)
method = method.lower()
if method not in ["cmle", "mle"]:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
# The parameters are no longer allowed to change in an instance
fit_params = (maxlag, method, ic, trend)
if self._fit_params is not None and self._fit_params != fit_params:
raise RuntimeError(REPEATED_FIT_ERROR.format(*self._fit_params))
if maxlag is None:
maxlag = int(round(12 * (nobs / 100.0) ** (1 / 4.0)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ["aic", "bic", "hqic", "t-stat"]:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr / arfit.nobs # needed for predict fcasterr
else: # method == "mle"
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError(
"Length of start params is %d. There"
" are %d parameters." % (len(start_params), k_trend + k_ar)
)
start_params = self._invtransparams(start_params)
if solver == "lbfgs":
kwargs.setdefault("pgtol", 1e-8)
kwargs.setdefault("factr", 1e2)
kwargs.setdefault("m", 12)
kwargs.setdefault("approx_grad", True)
mlefit = super(AR, self).fit(
start_params=start_params,
method=solver,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs,
)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
if method == "mle" and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
# Set fit params since completed the fit
if self._fit_params is None:
self._fit_params = fit_params
return ARResultsWrapper(arfit)
|
https://github.com/statsmodels/statsmodels/issues/947
|
res.t_test(np.eye(len(res.params)))
Traceback (most recent call last):
File "<pyshell#0>", line 1, in <module>
res.t_test(np.eye(len(res.params)))
File "e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2\statsmodels\statsmodels\base\model.py", line 1137, in t_test
raise ValueError('Need covariance of parameters for computing '
ValueError: Need covariance of parameters for computing T statistics
|
ValueError
|
def fit(
self,
start_params=None,
trend="c",
method="css-mle",
transparams=True,
solver="lbfgs",
maxiter=500,
full_output=1,
disp=5,
callback=None,
start_ar_lags=None,
**kwargs,
):
"""
Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array_like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 500.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : int, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
start_ar_lags : int, optional
Parameter for fitting start_params. When fitting start_params,
residuals are obtained from an AR fit, then an ARMA(p,q) model is
fit via OLS using these residuals. If start_ar_lags is None, fit
an AR process according to best BIC. If start_ar_lags is not None,
fits an AR process with a lag length equal to start_ar_lags.
See ARMA._fit_start_params_hr for more information.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
statsmodels.tsa.arima_model.ARMAResults class
See Also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARMAResults : results class returned by fit
Notes
-----
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unknown state is zero, and that the initial variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
if self._fit_params is not None:
fp = (trend, method)
if self._fit_params != fp:
raise RuntimeError(REPEATED_FIT_ERROR.format(*fp, mod="ARMA"))
k_ar = self.k_ar
k_ma = self.k_ma
# enforce invertibility
self.transparams = transparams
endog, exog = self.endog, self.exog
k_exog = self.k_exog
self.nobs = len(endog) # this is overwritten if method is 'css'
# (re)set trend and handle exogenous variables
# always pass original exog
if hasattr(self, "k_trend"):
k_trend = self.k_trend
exog = self.exog
else:
# Ensures only call once per ARMA instance
k_trend, exog = _make_arma_exog(endog, self.exog, trend)
# Check has something to estimate
if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:
raise ValueError(
"Estimation requires the inclusion of least one "
"AR term, MA term, a constant or an exogenous "
"variable."
)
# check again now that we know the trend
_check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)
self.k_trend = k_trend
self.exog = exog # overwrites original exog from __init__
# (re)set names for this model
self.exog_names = _make_arma_names(
self.data, k_trend, (k_ar, k_ma), self._orig_exog_names
)
k = k_trend + k_exog
# choose objective function
if k_ma == 0 and k_ar == 0:
method = "css" # Always CSS when no AR or MA terms
self.method = method = method.lower()
# adjust nobs for css
if method == "css":
self.nobs = len(self.endog) - k_ar
if start_params is not None:
start_params = array_like(start_params, "start_params")
else: # estimate starting parameters
start_params = self._fit_start_params((k_ar, k_ma, k), method, start_ar_lags)
if transparams: # transform initial parameters to ensure invertibility
start_params = self._invtransparams(start_params)
if solver == "lbfgs":
kwargs.setdefault("pgtol", 1e-8)
kwargs.setdefault("factr", 1e2)
kwargs.setdefault("m", 12)
kwargs.setdefault("approx_grad", True)
mlefit = super(ARMA, self).fit(
start_params,
method=solver,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs,
)
params = mlefit.params
if transparams: # transform parameters back
params = self._transparams(params)
self.transparams = False # so methods do not expect transf.
normalized_cov_params = None # TODO: fix this
armafit = ARMAResults(copy.copy(self), params, normalized_cov_params)
armafit.mle_retvals = mlefit.mle_retvals
armafit.mle_settings = mlefit.mle_settings
# Save core fit parameters for future checks
self._fit_params = (trend, method)
return ARMAResultsWrapper(armafit)
|
def fit(
self,
start_params=None,
trend="c",
method="css-mle",
transparams=True,
solver="lbfgs",
maxiter=500,
full_output=1,
disp=5,
callback=None,
start_ar_lags=None,
**kwargs,
):
"""
Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array_like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 500.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : int, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
start_ar_lags : int, optional
Parameter for fitting start_params. When fitting start_params,
residuals are obtained from an AR fit, then an ARMA(p,q) model is
fit via OLS using these residuals. If start_ar_lags is None, fit
an AR process according to best BIC. If start_ar_lags is not None,
fits an AR process with a lag length equal to start_ar_lags.
See ARMA._fit_start_params_hr for more information.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
statsmodels.tsa.arima_model.ARMAResults class
See Also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARMAResults : results class returned by fit
Notes
-----
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unknown state is zero, and that the initial variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
if self._fit_params is not None:
fp = (trend, method)
if self._fit_params != fp:
raise RuntimeError(REPEATED_FIT_ERROR.format(*fp, mod="ARMA"))
k_ar = self.k_ar
k_ma = self.k_ma
# enforce invertibility
self.transparams = transparams
endog, exog = self.endog, self.exog
k_exog = self.k_exog
self.nobs = len(endog) # this is overwritten if method is 'css'
# (re)set trend and handle exogenous variables
# always pass original exog
if hasattr(self, "k_trend"):
k_trend = self.k_trend
exog = self.exog
else:
# Ensures only call once per ARMA instance
k_trend, exog = _make_arma_exog(endog, self.exog, trend)
# Check has something to estimate
if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:
raise ValueError(
"Estimation requires the inclusion of least one "
"AR term, MA term, a constant or an exogenous "
"variable."
)
# check again now that we know the trend
_check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)
self.k_trend = k_trend
self.exog = exog # overwrites original exog from __init__
# (re)set names for this model
self.exog_names = _make_arma_names(
self.data, k_trend, (k_ar, k_ma), self._orig_exog_names
)
k = k_trend + k_exog
# choose objective function
if k_ma == 0 and k_ar == 0:
method = "css" # Always CSS when no AR or MA terms
self.method = method = method.lower()
# adjust nobs for css
if method == "css":
self.nobs = len(self.endog) - k_ar
if start_params is not None:
start_params = array_like(start_params, "start_params")
else: # estimate starting parameters
start_params = self._fit_start_params((k_ar, k_ma, k), method, start_ar_lags)
if transparams: # transform initial parameters to ensure invertibility
start_params = self._invtransparams(start_params)
if solver == "lbfgs":
kwargs.setdefault("pgtol", 1e-8)
kwargs.setdefault("factr", 1e2)
kwargs.setdefault("m", 12)
kwargs.setdefault("approx_grad", True)
mlefit = super(ARMA, self).fit(
start_params,
method=solver,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs,
)
params = mlefit.params
if transparams: # transform parameters back
params = self._transparams(params)
self.transparams = False # so methods do not expect transf.
normalized_cov_params = None # TODO: fix this
armafit = ARMAResults(self, params, normalized_cov_params)
armafit.mle_retvals = mlefit.mle_retvals
armafit.mle_settings = mlefit.mle_settings
# Save core fit parameters for future checks
self._fit_params = (trend, method)
return ARMAResultsWrapper(armafit)
|
https://github.com/statsmodels/statsmodels/issues/947
|
res.t_test(np.eye(len(res.params)))
Traceback (most recent call last):
File "<pyshell#0>", line 1, in <module>
res.t_test(np.eye(len(res.params)))
File "e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2\statsmodels\statsmodels\base\model.py", line 1137, in t_test
raise ValueError('Need covariance of parameters for computing '
ValueError: Need covariance of parameters for computing T statistics
|
ValueError
|
def attach_cov(self, result):
return DataFrame(result, index=self.cov_names, columns=self.cov_names)
|
def attach_cov(self, result):
return DataFrame(result, index=self.param_names, columns=self.param_names)
|
https://github.com/statsmodels/statsmodels/issues/2270
|
In [34]: results._results.cov_params.shape
Out[34]: (36, 36)
In [37]: results.cov_params
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-14357748fa96> in <module>()
<snip>
ValueError: Shape of passed values is (36, 36), indices imply (12, 12)
|
ValueError
|
def fit(self, maxlags=None, method="ols", ic=None, trend="c", verbose=False):
# todo: this code is only supporting deterministic terms as exog.
# This means that all exog-variables have lag 0. If dealing with
# different exogs is necessary, a `lags_exog`-parameter might make
# sense (e.g. a sequence of ints specifying lags).
# Alternatively, leading zeros for exog-variables with smaller number
# of lags than the maximum number of exog-lags might work.
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend : str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lütkepohl pp. 146-153
Returns
-------
est : VARResultsWrapper
"""
lags = maxlags
if trend not in ["c", "ct", "ctt", "nc"]:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags)
if not hasattr(selections, ic):
raise ValueError(
"%s not recognized, must be among %s" % (ic, sorted(selections))
)
lags = getattr(selections, ic)
if verbose:
print(selections)
print("Using %d based on %s criterion" % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = self.n_totobs - lags
# add exog to data.xnames (necessary because the length of xnames also
# determines the allowed size of VARResults.params)
if self.exog is not None:
x_names_to_add = [("exog%d" % i) for i in range(self.exog.shape[1])]
self.data.xnames = (
self.data.xnames[:k_trend] + x_names_to_add + self.data.xnames[k_trend:]
)
self.data.cov_names = [
".".join((str(yn), str(xn)))
for xn in self.data.xnames
for yn in self.data.ynames
]
return self._estimate_var(lags, trend=trend)
|
def fit(self, maxlags=None, method="ols", ic=None, trend="c", verbose=False):
# todo: this code is only supporting deterministic terms as exog.
# This means that all exog-variables have lag 0. If dealing with
# different exogs is necessary, a `lags_exog`-parameter might make
# sense (e.g. a sequence of ints specifying lags).
# Alternatively, leading zeros for exog-variables with smaller number
# of lags than the maximum number of exog-lags might work.
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend : str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lütkepohl pp. 146-153
Returns
-------
est : VARResultsWrapper
"""
lags = maxlags
if trend not in ["c", "ct", "ctt", "nc"]:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags)
if not hasattr(selections, ic):
raise ValueError(
"%s not recognized, must be among %s" % (ic, sorted(selections))
)
lags = getattr(selections, ic)
if verbose:
print(selections)
print("Using %d based on %s criterion" % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = self.n_totobs - lags
# add exog to data.xnames (necessary because the length of xnames also
# determines the allowed size of VARResults.params)
if self.exog is not None:
x_names_to_add = [("exog%d" % i) for i in range(self.exog.shape[1])]
self.data.xnames = (
self.data.xnames[:k_trend] + x_names_to_add + self.data.xnames[k_trend:]
)
return self._estimate_var(lags, trend=trend)
|
https://github.com/statsmodels/statsmodels/issues/2270
|
In [34]: results._results.cov_params.shape
Out[34]: (36, 36)
In [37]: results.cov_params
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-14357748fa96> in <module>()
<snip>
ValueError: Shape of passed values is (36, 36), indices imply (12, 12)
|
ValueError
|
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[params_for_deterministic_terms, A_1, ..., A_p] with the shape
(K x (Kp + number_of_deterministic_terms))
Adjusted to be an unbiased estimator
Ref: Lütkepohl p.74-75
"""
z = self.endog_lagged
return np.kron(scipy.linalg.inv(np.dot(z.T, z)), self.sigma_u)
|
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[params_for_deterministic_terms, A_1, ..., A_p] with the shape
(K x (Kp + number_of_deterministic_terms))
Adjusted to be an unbiased estimator
Ref: Lütkepohl p.74-75
"""
import warnings
warnings.warn(
"For consistency with other statmsodels models, "
"starting in version 0.11.0 `VARResults.cov_params` "
"will be a method instead of a property.",
category=FutureWarning,
)
z = self.endog_lagged
return np.kron(scipy.linalg.inv(np.dot(z.T, z)), self.sigma_u)
|
https://github.com/statsmodels/statsmodels/issues/2270
|
In [34]: results._results.cov_params.shape
Out[34]: (36, 36)
In [37]: results.cov_params
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-14357748fa96> in <module>()
<snip>
ValueError: Shape of passed values is (36, 36), indices imply (12, 12)
|
ValueError
|
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients w/o exog
"""
# drop exog
kn = self.k_exog * self.neqs
return self.cov_params()[kn:, kn:]
|
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients w/o exog
"""
# drop exog
return self._cov_params()[self.k_exog * self.neqs :, self.k_exog * self.neqs :]
|
https://github.com/statsmodels/statsmodels/issues/2270
|
In [34]: results._results.cov_params.shape
Out[34]: (36, 36)
In [37]: results.cov_params
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-14357748fa96> in <module>()
<snip>
ValueError: Shape of passed values is (36, 36), indices imply (12, 12)
|
ValueError
|
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size"""
stderr = np.sqrt(np.diag(self.cov_params()))
return stderr.reshape((self.df_model, self.neqs), order="C")
|
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size"""
stderr = np.sqrt(np.diag(self._cov_params()))
return stderr.reshape((self.df_model, self.neqs), order="C")
|
https://github.com/statsmodels/statsmodels/issues/2270
|
In [34]: results._results.cov_params.shape
Out[34]: (36, 36)
In [37]: results.cov_params
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-14357748fa96> in <module>()
<snip>
ValueError: Shape of passed values is (36, 36), indices imply (12, 12)
|
ValueError
|
def ksstat(x, cdf, alternative="two_sided", args=()):
"""
Calculate statistic for the Kolmogorov-Smirnov test for goodness of fit
This calculates the test statistic for a test of the distribution G(x) of
an observed variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
x : array_like, 1d
array of observations
cdf : string or callable
string: name of a distribution in scipy.stats
callable: function to evaluate cdf
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
args : tuple, sequence
distribution parameters for call to cdf
Returns
-------
D : float
KS test statistic, either D, D+ or D-
See Also
--------
scipy.stats.kstest
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
In contrast to scipy.stats.kstest, this function only calculates the
statistic which can be used either as distance measure or to implement
case specific p-values.
"""
nobs = float(len(x))
if isinstance(cdf, str):
cdf = getattr(stats.distributions, cdf).cdf
elif hasattr(cdf, "cdf"):
cdf = getattr(cdf, "cdf")
x = np.sort(x)
cdfvals = cdf(x, *args)
d_plus = (np.arange(1.0, nobs + 1) / nobs - cdfvals).max()
d_min = (cdfvals - np.arange(0.0, nobs) / nobs).max()
if alternative == "greater":
return d_plus
elif alternative == "less":
return d_min
return np.max([d_plus, d_min])
|
def ksstat(x, cdf, alternative="two_sided", args=()):
"""
Calculate statistic for the Kolmogorov-Smirnov test for goodness of fit
This calculates the test statistic for a test of the distribution G(x) of an observed
variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
x : array_like, 1d
array of observations
cdf : string or callable
string: name of a distribution in scipy.stats
callable: function to evaluate cdf
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
args : tuple, sequence
distribution parameters for call to cdf
Returns
-------
D : float
KS test statistic, either D, D+ or D-
See Also
--------
scipy.stats.kstest
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
In contrast to scipy.stats.kstest, this function only calculates the
statistic which can be used either as distance measure or to implement
case specific p-values.
"""
nobs = float(len(x))
if isinstance(cdf, string_types):
cdf = getattr(stats.distributions, cdf).cdf
elif hasattr(cdf, "cdf"):
cdf = getattr(cdf, "cdf")
x = np.sort(x)
cdfvals = cdf(x, *args)
if alternative in ["two_sided", "greater"]:
Dplus = (np.arange(1.0, nobs + 1) / nobs - cdfvals).max()
if alternative == "greater":
return Dplus
if alternative in ["two_sided", "less"]:
Dmin = (cdfvals - np.arange(0.0, nobs) / nobs).max()
if alternative == "less":
return Dmin
D = np.max([Dplus, Dmin])
return D
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def get_lilliefors_table(dist="norm"):
"""
Generates tables for significance levels of Lilliefors test statistics
Tables for available normal and exponential distribution testing,
as specified in Lilliefors references above
Parameters
----------
dist : string.
distribution being tested in set {'norm', 'exp'}.
Returns
-------
lf : TableDist object.
table of critical values
"""
# function just to keep things together
# for this test alpha is sf probability, i.e. right tail probability
alpha = 1 - np.array(PERCENTILES) / 100.0
alpha = alpha[::-1]
dist = "normal" if dist == "norm" else dist
if dist not in critical_values:
raise ValueError("Invalid dist parameter. Must be 'norm' or 'exp'")
cv_data = critical_values[dist]
acv_data = asymp_critical_values[dist]
size = np.array(sorted(cv_data), dtype=np.float)
crit_lf = np.array([cv_data[key] for key in sorted(cv_data)])
crit_lf = crit_lf[:, ::-1]
asym_params = np.array([acv_data[key] for key in sorted(acv_data)])
asymp_fn = _make_asymptotic_function((asym_params[::-1]))
lf = TableDist(alpha, size, crit_lf, asymptotic=asymp_fn)
return lf
|
def get_lilliefors_table(dist="norm"):
"""
Generates tables for significance levels of Lilliefors test statistics
Tables for available normal and exponential distribution testing,
as specified in Lilliefors references above
Parameters
----------
dist : string.
distribution being tested in set {'norm', 'exp'}.
Returns
-------
lf : TableDist object.
table of critical values
"""
# function just to keep things together
# for this test alpha is sf probability, i.e. right tail probability
if dist == "norm":
alpha = np.array([0.2, 0.15, 0.1, 0.05, 0.01, 0.001])[::-1]
size = np.array(
[
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
25,
30,
40,
100,
400,
900,
],
float,
)
# critical values, rows are by sample size, columns are by alpha
crit_lf = (
np.array(
[
[303, 321, 346, 376, 413, 433],
[289, 303, 319, 343, 397, 439],
[269, 281, 297, 323, 371, 424],
[252, 264, 280, 304, 351, 402],
[239, 250, 265, 288, 333, 384],
[227, 238, 252, 274, 317, 365],
[217, 228, 241, 262, 304, 352],
[208, 218, 231, 251, 291, 338],
[200, 210, 222, 242, 281, 325],
[193, 202, 215, 234, 271, 314],
[187, 196, 208, 226, 262, 305],
[181, 190, 201, 219, 254, 296],
[176, 184, 195, 213, 247, 287],
[171, 179, 190, 207, 240, 279],
[167, 175, 185, 202, 234, 273],
[163, 170, 181, 197, 228, 266],
[159, 166, 176, 192, 223, 260],
[143, 150, 159, 173, 201, 236],
[131, 138, 146, 159, 185, 217],
[115, 120, 128, 139, 162, 189],
[74, 77, 82, 89, 104, 122],
[37, 39, 41, 45, 52, 61],
[25, 26, 28, 30, 35, 42],
]
)[:, ::-1]
/ 1000.0
)
# also build a table for larger sample sizes
def f(n):
return np.array([0.736, 0.768, 0.805, 0.886, 1.031]) / np.sqrt(n)
higher_sizes = np.array(
[
35,
40,
45,
50,
60,
70,
80,
100,
200,
500,
1000,
2000,
3000,
5000,
10000,
100000,
],
float,
)
higher_crit_lf = np.zeros([higher_sizes.shape[0], crit_lf.shape[1] - 1])
for i in range(len(higher_sizes)):
higher_crit_lf[i, :] = f(higher_sizes[i])
alpha_large = alpha[:-1]
size_large = np.concatenate([size, higher_sizes])
crit_lf_large = np.vstack([crit_lf[:-4, :-1], higher_crit_lf])
lf = TableDist(alpha, size, crit_lf)
elif dist == "exp":
alpha = np.array([0.2, 0.15, 0.1, 0.05, 0.01])[::-1]
size = np.array(
[3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30],
float,
)
crit_lf = (
np.array(
[
[451, 479, 511, 551, 600],
[396, 422, 499, 487, 548],
[359, 382, 406, 442, 504],
[331, 351, 375, 408, 470],
[309, 327, 350, 382, 442],
[291, 308, 329, 360, 419],
[277, 291, 311, 341, 399],
[263, 277, 295, 325, 380],
[251, 264, 283, 311, 365],
[241, 254, 271, 298, 351],
[232, 245, 261, 287, 338],
[224, 237, 252, 277, 326],
[217, 229, 244, 269, 315],
[211, 222, 236, 261, 306],
[204, 215, 229, 253, 297],
[199, 210, 223, 246, 289],
[193, 204, 218, 239, 283],
[188, 199, 212, 234, 278],
[170, 180, 191, 210, 247],
[155, 164, 174, 192, 226],
]
)[:, ::-1]
/ 1000.0
)
def f(n):
return np.array([0.86, 0.91, 0.96, 1.06, 1.25]) / np.sqrt(n)
higher_sizes = np.array(
[
35,
40,
45,
50,
60,
70,
80,
100,
200,
500,
1000,
2000,
3000,
5000,
10000,
100000,
],
float,
)
higher_crit_lf = np.zeros([higher_sizes.shape[0], crit_lf.shape[1]])
for i in range(len(higher_sizes)):
higher_crit_lf[i, :] = f(higher_sizes[i])
size = np.concatenate([size, higher_sizes])
crit_lf = np.vstack([crit_lf, higher_crit_lf])
lf = TableDist(alpha, size, crit_lf)
else:
raise ValueError("Invalid dist parameter. dist must be 'norm' or 'exp'")
return lf
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def pval_lf(d_max, n):
"""
Approximate pvalues for Lilliefors test
This is only valid for pvalues smaller than 0.1 which is not checked in
this function.
Parameters
----------
d_max : array_like
two-sided Kolmogorov-Smirnov test statistic
n : int or float
sample size
Returns
-------
p-value : float or ndarray
pvalue according to approximation formula of Dallal and Wilkinson.
Notes
-----
This is mainly a helper function where the calling code should dispatch
on bound violations. Therefore it doesn't check whether the pvalue is in
the valid range.
Precision for the pvalues is around 2 to 3 decimals. This approximation is
also used by other statistical packages (e.g. R:fBasics) but might not be
the most precise available.
References
----------
DallalWilkinson1986
"""
# todo: check boundaries, valid range for n and Dmax
if n > 100:
d_max *= (n / 100.0) ** 0.49
n = 100
pval = np.exp(
-7.01256 * d_max**2 * (n + 2.78019)
+ 2.99587 * d_max * np.sqrt(n + 2.78019)
- 0.122119
+ 0.974598 / np.sqrt(n)
+ 1.67997 / n
)
return pval
|
def pval_lf(Dmax, n):
"""approximate pvalues for Lilliefors test
This is only valid for pvalues smaller than 0.1 which is not checked in
this function.
Parameters
----------
Dmax : array_like
two-sided Kolmogorov-Smirnov test statistic
n : int or float
sample size
Returns
-------
p-value : float or ndarray
pvalue according to approximation formula of Dallal and Wilkinson.
Notes
-----
This is mainly a helper function where the calling code should dispatch
on bound violations. Therefore it doesn't check whether the pvalue is in
the valid range.
Precision for the pvalues is around 2 to 3 decimals. This approximation is
also used by other statistical packages (e.g. R:fBasics) but might not be
the most precise available.
References
----------
DallalWilkinson1986
"""
# todo: check boundaries, valid range for n and Dmax
if n > 100:
Dmax *= (n / 100.0) ** 0.49
n = 100
pval = np.exp(
-7.01256 * Dmax**2 * (n + 2.78019)
+ 2.99587 * Dmax * np.sqrt(n + 2.78019)
- 0.122119
+ 0.974598 / np.sqrt(n)
+ 1.67997 / n
)
return pval
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def f(n):
poly = np.array([1, np.log(n), np.log(n) ** 2])
return np.exp(poly.dot(params.T))
|
def f(n):
return np.array([0.86, 0.91, 0.96, 1.06, 1.25]) / np.sqrt(n)
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def __init__(
self, alpha, size, crit_table, asymptotic=None, min_nobs=None, max_nobs=None
):
self.alpha = np.asarray(alpha)
if self.alpha.ndim != 1:
raise ValueError("alpha is not 1d")
elif (np.diff(self.alpha) <= 0).any():
raise ValueError("alpha is not sorted")
self.size = np.asarray(size)
if self.size.ndim != 1:
raise ValueError("size is not 1d")
elif (np.diff(self.size) <= 0).any():
raise ValueError("size is not sorted")
if self.size.ndim == 1:
if (np.diff(alpha) <= 0).any():
raise ValueError("alpha is not sorted")
self.crit_table = np.asarray(crit_table)
if self.crit_table.shape != (self.size.shape[0], self.alpha.shape[0]):
raise ValueError("crit_table must have shape(len(size), len(alpha))")
self.n_alpha = len(alpha)
self.signcrit = np.sign(np.diff(self.crit_table, 1).mean())
if self.signcrit > 0: # increasing
self.critv_bounds = self.crit_table[:, [0, 1]]
else:
self.critv_bounds = self.crit_table[:, [1, 0]]
self.asymptotic = None
max_size = self.max_size = max(size)
if asymptotic is not None:
try:
cv = asymptotic(self.max_size + 1)
except Exception as exc:
raise type(exc)(
"Calling asymptotic(self.size+1) failed. The "
"error message was:"
"\n\n{err_msg}".format(err_msg=exc.args[0])
)
if len(cv) != len(alpha):
raise ValueError("asymptotic does not return len(alpha) values")
self.asymptotic = asymptotic
self.min_nobs = max_size if min_nobs is None else min_nobs
self.max_nobs = max_size if max_nobs is None else max_nobs
if self.min_nobs > max_size:
raise ValueError("min_nobs > max(size)")
if self.max_nobs > max_size:
raise ValueError("max_nobs > max(size)")
|
def __init__(self, alpha, size, crit_table):
self.alpha = np.asarray(alpha)
self.size = np.asarray(size)
self.crit_table = np.asarray(crit_table)
self.n_alpha = len(alpha)
self.signcrit = np.sign(np.diff(self.crit_table, 1).mean())
if self.signcrit > 0: # increasing
self.critv_bounds = self.crit_table[:, [0, 1]]
else:
self.critv_bounds = self.crit_table[:, [1, 0]]
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def _critvals(self, n):
"""
Rows of the table, linearly interpolated for given sample size
Parameters
----------
n : float
sample size, second parameter of the table
Returns
-------
critv : ndarray, 1d
critical values (ppf) corresponding to a row of the table
Notes
-----
This is used in two step interpolation, or if we want to know the
critical values for all alphas for any sample size that we can obtain
through interpolation
"""
if n > self.max_size:
if self.asymptotic is not None:
cv = self.asymptotic(n)
else:
raise ValueError(
"n is above max(size) and no asymptotic distribtuion is provided"
)
else:
cv = [p(n) for p in self.polyn]
if n > self.min_nobs:
w = (n - self.min_nobs) / (self.max_nobs - self.min_nobs)
w = min(1.0, w)
a_cv = self.asymptotic(n)
cv = w * a_cv + (1 - w) * cv
return cv
|
def _critvals(self, n):
"""rows of the table, linearly interpolated for given sample size
Parameters
----------
n : float
sample size, second parameter of the table
Returns
-------
critv : ndarray, 1d
critical values (ppf) corresponding to a row of the table
Notes
-----
This is used in two step interpolation, or if we want to know the
critical values for all alphas for any sample size that we can obtain
through interpolation
"""
return np.array([p(n) for p in self.polyn])
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def prob(self, x, n):
"""
Find pvalues by interpolation, either cdf(x)
Returns extreme probabilities, 0.001 and 0.2, for out of range
Parameters
----------
x : array_like
observed value, assumed to follow the distribution in the table
n : float
sample size, second parameter of the table
Returns
-------
prob : array_like
This is the probability for each value of x, the p-value in
underlying distribution is for a statistical test.
"""
critv = self._critvals(n)
alpha = self.alpha
if self.signcrit < 1:
# reverse if critv is decreasing
critv, alpha = critv[::-1], alpha[::-1]
# now critv is increasing
if np.size(x) == 1:
if x < critv[0]:
return alpha[0]
elif x > critv[-1]:
return alpha[-1]
return interp1d(critv, alpha)(x)[()]
else:
# vectorized
cond_low = x < critv[0]
cond_high = x > critv[-1]
cond_interior = ~np.logical_or(cond_low, cond_high)
probs = np.nan * np.ones(x.shape) # mistake if nan left
probs[cond_low] = alpha[0]
probs[cond_low] = alpha[-1]
probs[cond_interior] = interp1d(critv, alpha)(x[cond_interior])
return probs
|
def prob(self, x, n):
"""find pvalues by interpolation, eiter cdf(x) or sf(x)
returns extrem probabilities, 0.001 and 0.2, for out of range
Parameters
----------
x : array_like
observed value, assumed to follow the distribution in the table
n : float
sample size, second parameter of the table
Returns
-------
prob : arraylike
This is the probability for each value of x, the p-value in
underlying distribution is for a statistical test.
"""
critv = self._critvals(n)
alpha = self.alpha
# if self.signcrit == 1:
# if x < critv[0]: #generalize: ? np.sign(x - critvals[0]) == self.signcrit:
# return alpha[0]
# elif x > critv[-1]:
# return alpha[-1]
# elif self.signcrit == -1:
# if x > critv[0]:
# return alpha[0]
# elif x < critv[-1]:
# return alpha[-1]
if self.signcrit < 1:
# reverse if critv is decreasing
critv, alpha = critv[::-1], alpha[::-1]
# now critv is increasing
if np.size(x) == 1:
if x < critv[0]:
return alpha[0]
elif x > critv[-1]:
return alpha[-1]
return interp1d(critv, alpha)(x)[()]
else:
# vectorized
cond_low = x < critv[0]
cond_high = x > critv[-1]
cond_interior = ~np.logical_or(cond_low, cond_high)
probs = np.nan * np.ones(x.shape) # mistake if nan left
probs[cond_low] = alpha[0]
probs[cond_low] = alpha[-1]
probs[cond_interior] = interp1d(critv, alpha)(x[cond_interior])
return probs
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def crit(self, prob, n):
"""
Returns interpolated quantiles, similar to ppf or isf
use two sequential 1d interpolation, first by n then by prob
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob
"""
prob = np.asarray(prob)
alpha = self.alpha
critv = self._critvals(n)
# vectorized
cond_ilow = prob > alpha[0]
cond_ihigh = prob < alpha[-1]
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
# scalar
if prob.size == 1:
if cond_interior:
return interp1d(alpha, critv)(prob)
else:
return np.nan
# vectorized
quantile = np.nan * np.ones(prob.shape) # nans for outside
quantile[cond_interior] = interp1d(alpha, critv)(prob[cond_interior])
return quantile
|
def crit(self, prob, n):
"""returns interpolated quantiles, similar to ppf or isf
use two sequential 1d interpolation, first by n then by prob
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob
"""
prob = np.asarray(prob)
alpha = self.alpha
critv = self._critvals(n)
# vectorized
cond_ilow = prob > alpha[0]
cond_ihigh = prob < alpha[-1]
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
# scalar
if prob.size == 1:
if cond_interior:
return interp1d(alpha, critv)(prob)
else:
return np.nan
# vectorized
quantile = np.nan * np.ones(prob.shape) # nans for outside
quantile[cond_interior] = interp1d(alpha, critv)(prob[cond_interior])
return quantile
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def crit3(self, prob, n):
"""
Returns interpolated quantiles, similar to ppf or isf
uses Rbf to interpolate critical values as function of `prob` and `n`
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob, returns nan for arguments
that are outside of the table bounds
"""
prob = np.asarray(prob)
alpha = self.alpha
# vectorized
cond_ilow = prob > alpha[0]
cond_ihigh = prob < alpha[-1]
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
# scalar
if prob.size == 1:
if cond_interior:
return self.polyrbf(n, prob)
else:
return np.nan
# vectorized
quantile = np.nan * np.ones(prob.shape) # nans for outside
quantile[cond_interior] = self.polyrbf(n, prob[cond_interior])
return quantile
|
def crit3(self, prob, n):
"""returns interpolated quantiles, similar to ppf or isf
uses Rbf to interpolate critical values as function of `prob` and `n`
Parameters
----------
prob : array_like
probabilities corresponding to the definition of table columns
n : int or float
sample size, second parameter of the table
Returns
-------
ppf : array_like
critical values with same shape as prob, returns nan for arguments
that are outside of the table bounds
"""
prob = np.asarray(prob)
alpha = self.alpha
# vectorized
cond_ilow = prob > alpha[0]
cond_ihigh = prob < alpha[-1]
cond_interior = np.logical_or(cond_ilow, cond_ihigh)
# scalar
if prob.size == 1:
if cond_interior:
return self.polyrbf(n, prob)
else:
return np.nan
# vectorized
quantile = np.nan * np.ones(prob.shape) # nans for outside
quantile[cond_interior] = self.polyrbf(n, prob[cond_interior])
return quantile
|
https://github.com/statsmodels/statsmodels/issues/5333
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/_lilliefors.py", line 344, in kstest_fit
pval = lilliefors_table.prob(d_ks, nobs)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 120, in prob
critv = self._critvals(n)
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in _critvals
return np.array([p(n) for p in self.polyn])
File "/home/testone/lib64/python3.6/site-packages/statsmodels/stats/tabledist.py", line 99, in <listcomp>
return np.array([p(n) for p in self.polyn])
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/polyint.py", line 79, in __call__
y = self._evaluate(x)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 634, in _evaluate
below_bounds, above_bounds = self._check_bounds(x_new)
File "/usr/lib64/python3.6/site-packages/scipy/interpolate/interpolate.py", line 663, in _check_bounds
raise ValueError("A value in x_new is below the interpolation "
ValueError: A value in x_new is below the interpolation range.
|
ValueError
|
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
# groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
# Fill empty missing with 0, see GH5639
averaged = averaged.fillna(0.0)
return averaged
|
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
# groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
|
https://github.com/statsmodels/statsmodels/issues/5639
|
/home/nbuser/anaconda3_501/lib/python3.6/site-packages/statsmodels/graphics/mosaicplot.py:40: RuntimeWarning: invalid value encountered in less
if np.any(proportion < 0):
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
posx and posy should be finite values
/home/nbuser/anaconda3_501/lib/python3.6/site-packages/matplotlib/transforms.py:402: RuntimeWarning: invalid value encountered in double_scalars
return points[1, 0] - points[0, 0]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3_501/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/anaconda3_501/lib/python3.6/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/anaconda3_501/lib/python3.6/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2051 bbox_artists = kwargs.pop("bbox_extra_artists", None)
2052 bbox_inches = self.figure.get_tightbbox(renderer,
-> 2053 bbox_extra_artists=bbox_artists)
2054 pad = kwargs.pop("pad_inches", None)
2055 if pad is None:
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/figure.py in get_tightbbox(self, renderer, bbox_extra_artists)
2271
2272 for a in artists:
-> 2273 bbox = a.get_tightbbox(renderer)
2274 if bbox is not None and (bbox.width != 0 or bbox.height != 0):
2275 bb.append(bbox)
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/axes/_base.py in get_tightbbox(self, renderer, call_axes_locator, bbox_extra_artists)
4220
4221 for a in bbox_artists:
-> 4222 bbox = a.get_tightbbox(renderer)
4223 if bbox is not None and (bbox.width != 0 or bbox.height != 0):
4224 bb.append(bbox)
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/artist.py in get_tightbbox(self, renderer)
269 """
270
--> 271 bbox = self.get_window_extent(renderer)
272 if self.get_clip_on():
273 clip_box = self.get_clip_box()
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/spines.py in get_window_extent(self, renderer)
151 # correct:
152 self._adjust_location()
--> 153 return super().get_window_extent(renderer=renderer)
154
155 def get_path(self):
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/patches.py in get_window_extent(self, renderer)
546
547 def get_window_extent(self, renderer=None):
--> 548 return self.get_path().get_extents(self.get_transform())
549
550
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/path.py in get_extents(self, transform)
526 path = self
527 if transform is not None:
--> 528 transform = transform.frozen()
529 if not transform.is_affine:
530 path = self.transformed(transform)
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/transforms.py in frozen(self)
2191
2192 def frozen(self):
-> 2193 return blended_transform_factory(self._x.frozen(), self._y.frozen())
2194 frozen.__doc__ = Transform.frozen.__doc__
2195
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/transforms.py in blended_transform_factory(x_transform, y_transform)
2338 if (isinstance(x_transform, Affine2DBase)
2339 and isinstance(y_transform, Affine2DBase)):
-> 2340 return BlendedAffine2D(x_transform, y_transform)
2341 return BlendedGenericTransform(x_transform, y_transform)
2342
~/anaconda3_501/lib/python3.6/site-packages/matplotlib/transforms.py in __init__(self, x_transform, y_transform, **kwargs)
2279 is_correct = is_affine and is_separable
2280 if not is_correct:
-> 2281 raise ValueError("Both *x_transform* and *y_transform* must be 2D "
2282 "affine transforms")
2283
ValueError: Both *x_transform* and *y_transform* must be 2D affine transforms
|
ValueError
|
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == "c": # constant only
exog = np.ones((len(endog), 1))
elif exog is not None and trend == "c": # constant plus exogenous
exog = add_trend(exog, trend="c", prepend=True, has_constant="raise")
elif exog is not None and trend == "nc":
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == "nc":
k_trend = 0
return k_trend, exog
|
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == "c": # constant only
exog = np.ones((len(endog), 1))
elif exog is not None and trend == "c": # constant plus exogenous
exog = add_trend(exog, trend="c", prepend=True)
elif exog is not None and trend == "nc":
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == "nc":
k_trend = 0
return k_trend, exog
|
https://github.com/statsmodels/statsmodels/issues/3343
|
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
X = [0.5,0.5,0.5,0.5,0.5]
Y = [-0.011866,0.003380,0.015357,0.004451,-0.020889]
T = ['2000-01-01', '2000-01-02', '2000-01-03', '2000-01-04', '2000-01-05']
TDT = pd.to_datetime(T)
df = pd.DataFrame({'x': X, 'y': Y})
df.index = TDT
df
x y
2000-01-01 0.5 -0.011866
2000-01-02 0.5 0.003380
2000-01-03 0.5 0.015357
2000-01-04 0.5 0.004451
2000-01-05 0.5 -0.020889
m = ARIMA(endog=df.y, order=(1,1,0), exog=df.x)
r = m.fit()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py", line 1104, in fit
callback, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py", line 919, in fit
start_params = self._fit_start_params((k_ar, k_ma, k), method)
File "/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py", line 563, in _fit_start_params
bounds=bounds, iprint=-1)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/lbfgsb.py", line 193, in fmin_l_bfgs_b
**opts)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/lbfgsb.py", line 328, in _minimize_lbfgsb
f, g = func_and_grad(x)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/lbfgsb.py", line 273, in func_and_grad
f = fun(x, *args)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/optimize.py", line 292, in function_wrapper
return function(*(wrapper_args + args))
File "/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py", line 554, in <lambda>
func = lambda params: -self.loglike_css(params)
File "/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py", line 788, in loglike_css
y -= dot(self.exog, newparams[:k])
ValueError: shapes (4,1) and (2,) not aligned: 1 (dim 1) != 2 (dim 0)
|
ValueError
|
def _maybe_convert_ynames_int(self, ynames):
# see if they're integers
issue_warning = False
msg = (
"endog contains values are that not int-like. Uses string "
"representation of value. Use integer-valued endog to "
"suppress this warning."
)
for i in ynames:
try:
if ynames[i] % 1 == 0:
ynames[i] = str(int(ynames[i]))
else:
issue_warning = True
ynames[i] = str(ynames[i])
except TypeError:
ynames[i] = str(ynames[i])
if issue_warning:
import warnings
warnings.warn(msg, SpecificationWarning)
return ynames
|
def _maybe_convert_ynames_int(self, ynames):
# see if they're integers
try:
for i in ynames:
if ynames[i] % 1 == 0:
ynames[i] = str(int(ynames[i]))
except TypeError:
pass
return ynames
|
https://github.com/statsmodels/statsmodels/issues/3960
|
result.summary()
ynames = ['='.join([yname, name]) for name in ynames]
TypeError: sequence item 1: expected str instance, numpy.float64 found
ynames = ['='.join([yname, name]) for name in ynames]
File "m:\...\statsmodels\discrete\discrete_model.py", line 3916, in <listcomp>
File "m:\...\statsmodels\discrete\discrete_model.py", line 3520, in summary
yname, yname_list = self._get_endog_name(yname, yname_list)
File "m:\...\statsmodels\discrete\discrete_model.py", line 3916, in _get_endog_name
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
|
TypeError
|
def _make_dictnames(tmp_arr, offset=0):
"""
Helper function to create a dictionary mapping a column number
to the name in tmp_arr.
"""
col_map = {}
for i, col_name in enumerate(tmp_arr):
col_map[i + offset] = col_name
return col_map
|
def _make_dictnames(tmp_arr, offset=0):
"""
Helper function to create a dictionary mapping a column number
to the name in tmp_arr.
"""
col_map = {}
for i, col_name in enumerate(tmp_arr):
col_map.update({i + offset: col_name})
return col_map
|
https://github.com/statsmodels/statsmodels/issues/1342
|
In [9]: sm.categorical(pd.DataFrame({'a':[1,2,12], 'b':['a','b','a']}), col='a')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-9-5966a3ee6951> in <module>()
----> 1 sm.categorical(pd.DataFrame({'a':[1,2,12], 'b':['a','b','a']}), col='a')
/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/tools/tools.pyc in categorical(data, col, dictnames, drop)
143 #TODO: add a NameValidator function
144 # catch recarrays and structured arrays
--> 145 if data.dtype.names or data.__class__ is np.recarray:
146 if not col and np.squeeze(data).ndim > 1:
147 raise IndexError("col is None and the input array is not 1d")
/usr/local/lib/python2.7/dist-packages/pandas-0.13.0_285_gfcfaa7d-py2.7-linux-x86_64.egg/pandas/core/generic.pyc in __getattr__(self, name)
1802 return self[name]
1803 raise AttributeError("'%s' object has no attribute '%s'" %
-> 1804 (type(self).__name__, name))
1805
1806 def __setattr__(self, name, value):
AttributeError: 'DataFrame' object has no attribute 'dtype'
|
AttributeError
|
def categorical(data, col=None, dictnames=False, drop=False):
"""
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, array, Series or DataFrame. This can be
either a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : {str, int, None}
If data is a DataFrame col must in a column of data. If data is a
Series, col must be either the name of the Series or None. If data is a
structured array or a recarray, `col` can be a string that is the name
of the column that contains the variable. For all other
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
-------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.ascii_lowercase[0:5], \
string.ascii_lowercase[5:10], \
string.ascii_lowercase[10:15], \
string.ascii_lowercase[15:20], \
string.ascii_lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
"""
# TODO: add a NameValidator function
if isinstance(col, (list, tuple)):
if len(col) == 1:
col = col[0]
else:
raise ValueError("Can only convert one column at a time")
if (
not isinstance(data, (pd.DataFrame, pd.Series))
and not isinstance(col, (string_types, int))
and col is not None
):
raise TypeError("col must be a str, int or None")
# Pull out a Series from a DataFrame if provided
if isinstance(data, pd.DataFrame):
if col is None:
raise TypeError("col must be a str or int when using a DataFrame")
elif col not in data:
raise ValueError("Column '{0}' not found in data".format(col))
data = data[col]
# Set col to None since we not have a Series
col = None
if isinstance(data, pd.Series):
if col is not None and data.name != col:
raise ValueError("data.name does not match col '{0}'".format(col))
data_cat = data.astype("category")
dummies = pd.get_dummies(data_cat)
col_map = {
i: cat for i, cat in enumerate(data_cat.cat.categories) if cat in dummies
}
if not drop:
dummies.columns = list(dummies.columns)
dummies = pd.concat([dummies, data], 1)
if dictnames:
return dummies, col_map
return dummies
# catch recarrays and structured arrays
elif data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, (int, long)):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except:
col = "var"
# TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + "_" + item for item in tmp_arr]
# TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
dt = lzip(tmp_arr, [tmp_dummy.dtype.str] * len(tmp_arr))
# preserve array type
return np.array(lmap(tuple, tmp_dummy.tolist()), dtype=dt).view(
type(data)
)
data = nprf.drop_fields(
data, col, usemask=False, asrecarray=type(data) is np.recarray
)
data = nprf.append_fields(
data,
tmp_arr,
data=tmp_dummy,
usemask=False,
asrecarray=type(data) is np.recarray,
)
return data
# Catch array-like for an error
elif not isinstance(data, np.ndarray):
raise NotImplementedError("Array-like objects are not supported")
else:
if isinstance(col, (int, long)):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:, col])
tmp_dummy = (tmp_arr[:, np.newaxis] == data[:, col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:, None] == data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset=1)
return data, col_map
return data
else:
raise IndexError("The index %s is not understood" % col)
|
def categorical(
data,
col=None,
dictnames=False,
drop=False,
):
"""
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, or array. This can be either
a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column that contains the variable. For all
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
-------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.ascii_lowercase[0:5], \
string.ascii_lowercase[5:10], \
string.ascii_lowercase[10:15], \
string.ascii_lowercase[15:20], \
string.ascii_lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
"""
if isinstance(col, (list, tuple)):
try:
assert len(col) == 1
col = col[0]
except:
raise ValueError("Can only convert one column at a time")
# TODO: add a NameValidator function
# catch recarrays and structured arrays
if data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, (int, long)):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except:
col = "var"
# TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + "_" + item for item in tmp_arr]
# TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
dt = lzip(tmp_arr, [tmp_dummy.dtype.str] * len(tmp_arr))
# preserve array type
return np.array(lmap(tuple, tmp_dummy.tolist()), dtype=dt).view(
type(data)
)
data = nprf.drop_fields(
data, col, usemask=False, asrecarray=type(data) is np.recarray
)
data = nprf.append_fields(
data,
tmp_arr,
data=tmp_dummy,
usemask=False,
asrecarray=type(data) is np.recarray,
)
return data
# handle ndarrays and catch array-like for an error
elif data.__class__ is np.ndarray or not isinstance(data, np.ndarray):
if not isinstance(data, np.ndarray):
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, (int, long)):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:, col])
tmp_dummy = (tmp_arr[:, np.newaxis] == data[:, col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:, None] == data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset=1)
return data, col_map
return data
else:
raise IndexError("The index %s is not understood" % col)
|
https://github.com/statsmodels/statsmodels/issues/1342
|
In [9]: sm.categorical(pd.DataFrame({'a':[1,2,12], 'b':['a','b','a']}), col='a')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-9-5966a3ee6951> in <module>()
----> 1 sm.categorical(pd.DataFrame({'a':[1,2,12], 'b':['a','b','a']}), col='a')
/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/tools/tools.pyc in categorical(data, col, dictnames, drop)
143 #TODO: add a NameValidator function
144 # catch recarrays and structured arrays
--> 145 if data.dtype.names or data.__class__ is np.recarray:
146 if not col and np.squeeze(data).ndim > 1:
147 raise IndexError("col is None and the input array is not 1d")
/usr/local/lib/python2.7/dist-packages/pandas-0.13.0_285_gfcfaa7d-py2.7-linux-x86_64.egg/pandas/core/generic.pyc in __getattr__(self, name)
1802 return self[name]
1803 raise AttributeError("'%s' object has no attribute '%s'" %
-> 1804 (type(self).__name__, name))
1805
1806 def __setattr__(self, name, value):
AttributeError: 'DataFrame' object has no attribute 'dtype'
|
AttributeError
|
def _get_names(self, arr):
if isinstance(arr, DataFrame):
if isinstance(arr.columns, MultiIndex):
# Flatten MultiIndexes into "simple" column names
return [".".join((level for level in c if level)) for c in arr.columns]
else:
return list(arr.columns)
elif isinstance(arr, Series):
if arr.name:
return [arr.name]
else:
return
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
|
def _get_names(self, arr):
if isinstance(arr, DataFrame):
return list(arr.columns)
elif isinstance(arr, Series):
if arr.name:
return [arr.name]
else:
return
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
|
https://github.com/statsmodels/statsmodels/issues/5414
|
Traceback (most recent call last):
File "xxx/__init__.py", line 451, in do_algorithm_usecase
if af.compute():
File "xxx/algorithms/es.py", line 153, in compute
preds = self.fitted_model.predict(predict_from, predict_until)
File "xxx/myenv/lib/python3.7/site-packages/statsmodels/base/wrapper.py", line 95, in wrapper
obj = data.wrap_output(func(results, *args, **kwargs), how)
File "xxx/myenv/lib/python3.7/site-packages/statsmodels/base/data.py", line 416, in wrap_output
return self.attach_dates(obj)
File "xxx/myenv/lib/python3.7/site-packages/statsmodels/base/data.py", line 563, in attach_dates
columns=self.ynames)
File "xxx/myenv/lib/python3.7/site-packages/pandas/core/frame.py", line 379, in __init__
copy=copy)
File "xxx/myenv/lib/python3.7/site-packages/pandas/core/frame.py", line 536, in _init_ndarray
return create_block_manager_from_blocks([values], [columns, index])
File "xxx/myenv/lib/python3.7/site-packages/pandas/core/internals.py", line 4866, in create_block_manager_from_blocks
construction_error(tot_items, blocks[0].shape[1:], axes, e)
File "xxx/myenv/lib/python3.7/site-packages/pandas/core/internals.py", line 4843, in construction_error
passed, implied))
ValueError: Shape of passed values is (1, 3), indices imply (3, 3)
|
ValueError
|
def __init__(
self,
data,
ncomp=None,
standardize=True,
demean=True,
normalize=True,
gls=False,
weights=None,
method="svd",
missing=None,
tol=5e-8,
max_iter=1000,
tol_em=5e-8,
max_em_iter=100,
):
self._index = None
self._columns = []
if isinstance(data, pd.DataFrame):
self._index = data.index
self._columns = data.columns
self.data = np.asarray(data)
# Store inputs
self._gls = gls
self._normalize = normalize
self._tol = tol
if not 0 < self._tol < 1:
raise ValueError("tol must be strictly between 0 and 1")
self._max_iter = max_iter
self._max_em_iter = max_em_iter
self._tol_em = tol_em
# Prepare data
self._standardize = standardize
self._demean = demean
self._nobs, self._nvar = self.data.shape
if weights is None:
weights = np.ones(self._nvar)
else:
weights = np.array(weights).flatten()
if weights.shape[0] != self._nvar:
raise ValueError("weights should have nvar elements")
weights = weights / np.sqrt((weights**2.0).mean())
self.weights = weights
# Check ncomp against maximum
min_dim = min(self._nobs, self._nvar)
self._ncomp = min_dim if ncomp is None else ncomp
if self._ncomp > min_dim:
import warnings
warn = (
"The requested number of components is more than can be "
"computed from data. The maximum number of components is "
"the minimum of the number of observations or variables"
)
warnings.warn(warn, ValueWarning)
self._ncomp = min_dim
self._method = method
# Workaround to avoid instance methods in __dict__
if self._method not in ("eig", "svd", "nipals"):
raise ValueError("method {0} is not known.".format(method))
self.rows = np.arange(self._nobs)
self.cols = np.arange(self._nvar)
# Handle missing
self._missing = missing
self._adjusted_data = self.data
if missing is not None:
self._adjust_missing()
# Update size
self._nobs, self._nvar = self._adjusted_data.shape
if self._ncomp == np.min(self.data.shape):
self._ncomp = np.min(self._adjusted_data.shape)
elif self._ncomp > np.min(self._adjusted_data.shape):
raise ValueError(
"When adjusting for missing values, user "
"provided ncomp must be no larger than the "
"smallest dimension of the "
"missing-value-adjusted data size."
)
# Attributes and internal values
self._tss = 0.0
self._ess = None
self.transformed_data = None
self._mu = None
self._sigma = None
self._ess_indiv = None
self._tss_indiv = None
self.scores = self.factors = None
self.loadings = None
self.coeff = None
self.eigenvals = None
self.eigenvecs = None
self.projection = None
self.rsquare = None
self.ic = None
# Prepare data
self.transformed_data = self._prepare_data()
# Perform the PCA
self._pca()
if gls:
self._compute_gls_weights()
self.transformed_data = self._prepare_data()
self._pca()
# Final calculations
self._compute_rsquare_and_ic()
if self._index is not None:
self._to_pandas()
|
def __init__(
self,
data,
ncomp=None,
standardize=True,
demean=True,
normalize=True,
gls=False,
weights=None,
method="svd",
missing=None,
tol=5e-8,
max_iter=1000,
tol_em=5e-8,
max_em_iter=100,
):
self._index = None
self._columns = []
if isinstance(data, pd.DataFrame):
self._index = data.index
self._columns = data.columns
self.data = np.asarray(data)
# Store inputs
self._gls = gls
self._normalize = normalize
self._tol = tol
if not 0 < self._tol < 1:
raise ValueError("tol must be strictly between 0 and 1")
self._max_iter = max_iter
self._max_em_iter = max_em_iter
self._tol_em = tol_em
# Prepare data
self._standardize = standardize
self._demean = demean
self._nobs, self._nvar = self.data.shape
if weights is None:
weights = np.ones(self._nvar)
else:
weights = np.array(weights).flatten()
if weights.shape[0] != self._nvar:
raise ValueError("weights should have nvar elements")
weights = weights / np.sqrt((weights**2.0).mean())
self.weights = weights
# Check ncomp against maximum
min_dim = min(self._nobs, self._nvar)
self._ncomp = min_dim if ncomp is None else ncomp
if self._ncomp > min_dim:
import warnings
warn = (
"The requested number of components is more than can be "
"computed from data. The maximum number of components is "
"the minimum of the number of observations or variables"
)
warnings.warn(warn, ValueWarning)
self._ncomp = min_dim
self._method = method
if self._method == "eig":
self._compute_eig = self._compute_using_eig
elif self._method == "svd":
self._compute_eig = self._compute_using_svd
elif self._method == "nipals":
self._compute_eig = self._compute_using_nipals
else:
raise ValueError("method is not known.")
self.rows = np.arange(self._nobs)
self.cols = np.arange(self._nvar)
# Handle missing
self._missing = missing
self._adjusted_data = self.data
if missing is not None:
self._adjust_missing()
# Update size
self._nobs, self._nvar = self._adjusted_data.shape
if self._ncomp == np.min(self.data.shape):
self._ncomp = np.min(self._adjusted_data.shape)
elif self._ncomp > np.min(self._adjusted_data.shape):
raise ValueError(
"When adjusting for missing values, user "
"provided ncomp must be no larger than the "
"smallest dimension of the "
"missing-value-adjusted data size."
)
# Attributes and internal values
self._tss = 0.0
self._ess = None
self.transformed_data = None
self._mu = None
self._sigma = None
self._ess_indiv = None
self._tss_indiv = None
self.scores = self.factors = None
self.loadings = None
self.coeff = None
self.eigenvals = None
self.eigenvecs = None
self.projection = None
self.rsquare = None
self.ic = None
# Prepare data
self.transformed_data = self._prepare_data()
# Perform the PCA
self._pca()
if gls:
self._compute_gls_weights()
self.transformed_data = self._prepare_data()
self._pca()
# Final calculations
self._compute_rsquare_and_ic()
if self._index is not None:
self._to_pandas()
|
https://github.com/statsmodels/statsmodels/issues/4772
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\[...]\lib\site-packages\statsmodels\graphics\functional.py", line 32, in _pickle_method
if m.im_self is None:
AttributeError: 'function' object has no attribute 'im_self'
|
AttributeError
|
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
if bw is None:
bw = "normal_reference"
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
# Workaround to avoid instance methods in __dict__
if bw == "normal_reference":
bwfunc = self._normal_reference
elif bw == "cv_ml":
bwfunc = self._cv_ml
else: # bw == 'cv_ls'
bwfunc = self._cv_ls
res = bwfunc()
return res
|
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(
normal_reference=self._normal_reference, cv_ml=self._cv_ml, cv_ls=self._cv_ls
)
if bw is None:
bw = "normal_reference"
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
|
https://github.com/statsmodels/statsmodels/issues/4772
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\[...]\lib\site-packages\statsmodels\graphics\functional.py", line 32, in _pickle_method
if m.im_self is None:
AttributeError: 'function' object has no attribute 'im_self'
|
AttributeError
|
def __init__(self, endog, exog, var_type, reg_type="ll", bw="cv_ls", defaults=None):
self.var_type = var_type
self.data_type = var_type
self.reg_type = reg_type
self.k_vars = len(self.var_type)
self.endog = _adjust_shape(endog, 1)
self.exog = _adjust_shape(exog, self.k_vars)
self.data = np.column_stack((self.endog, self.exog))
self.nobs = np.shape(self.exog)[0]
self.est = dict(lc=self._est_loc_constant, ll=self._est_loc_linear)
defaults = EstimatorSettings() if defaults is None else defaults
self._set_defaults(defaults)
if not isinstance(bw, string_types):
bw = np.asarray(bw)
if len(bw) != self.k_vars:
raise ValueError(
"bw must have the same dimension as the number of variables."
)
if not self.efficient:
self.bw = self._compute_reg_bw(bw)
else:
self.bw = self._compute_efficient(bw)
|
def __init__(self, endog, exog, var_type, reg_type="ll", bw="cv_ls", defaults=None):
self.var_type = var_type
self.data_type = var_type
self.reg_type = reg_type
self.k_vars = len(self.var_type)
self.endog = _adjust_shape(endog, 1)
self.exog = _adjust_shape(exog, self.k_vars)
self.data = np.column_stack((self.endog, self.exog))
self.nobs = np.shape(self.exog)[0]
self.bw_func = dict(cv_ls=self.cv_loo, aic=self.aic_hurvich)
self.est = dict(lc=self._est_loc_constant, ll=self._est_loc_linear)
defaults = EstimatorSettings() if defaults is None else defaults
self._set_defaults(defaults)
if not isinstance(bw, string_types):
bw = np.asarray(bw)
if len(bw) != self.k_vars:
raise ValueError(
"bw must have the same dimension as the number of variables."
)
if not self.efficient:
self.bw = self._compute_reg_bw(bw)
else:
self.bw = self._compute_efficient(bw)
|
https://github.com/statsmodels/statsmodels/issues/4772
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\[...]\lib\site-packages\statsmodels\graphics\functional.py", line 32, in _pickle_method
if m.im_self is None:
AttributeError: 'function' object has no attribute 'im_self'
|
AttributeError
|
def _compute_reg_bw(self, bw):
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
return np.asarray(bw)
else:
# The user specified a bandwidth selection method e.g. 'cv_ls'
self._bw_method = bw
# Workaround to avoid instance methods in __dict__
if bw == "cv_ls":
res = self.cv_loo
else: # bw == 'aic'
res = self.aic_hurvich
X = np.std(self.exog, axis=0)
h0 = 1.06 * X * self.nobs ** (-1.0 / (4 + np.size(self.exog, axis=1)))
func = self.est[self.reg_type]
bw_estimated = optimize.fmin(
res, x0=h0, args=(func,), maxiter=1e3, maxfun=1e3, disp=0
)
return bw_estimated
|
def _compute_reg_bw(self, bw):
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
return np.asarray(bw)
else:
# The user specified a bandwidth selection method e.g. 'cv_ls'
self._bw_method = bw
res = self.bw_func[bw]
X = np.std(self.exog, axis=0)
h0 = 1.06 * X * self.nobs ** (-1.0 / (4 + np.size(self.exog, axis=1)))
func = self.est[self.reg_type]
bw_estimated = optimize.fmin(
res, x0=h0, args=(func,), maxiter=1e3, maxfun=1e3, disp=0
)
return bw_estimated
|
https://github.com/statsmodels/statsmodels/issues/4772
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\[...]\lib\site-packages\statsmodels\graphics\functional.py", line 32, in _pickle_method
if m.im_self is None:
AttributeError: 'function' object has no attribute 'im_self'
|
AttributeError
|
def __init__(
self, endog, exog, var_type, reg_type, bw="cv_ls", censor_val=0, defaults=None
):
self.var_type = var_type
self.data_type = var_type
self.reg_type = reg_type
self.k_vars = len(self.var_type)
self.endog = _adjust_shape(endog, 1)
self.exog = _adjust_shape(exog, self.k_vars)
self.data = np.column_stack((self.endog, self.exog))
self.nobs = np.shape(self.exog)[0]
self.est = dict(lc=self._est_loc_constant, ll=self._est_loc_linear)
defaults = EstimatorSettings() if defaults is None else defaults
self._set_defaults(defaults)
self.censor_val = censor_val
if self.censor_val is not None:
self.censored(censor_val)
else:
self.W_in = np.ones((self.nobs, 1))
if not self.efficient:
self.bw = self._compute_reg_bw(bw)
else:
self.bw = self._compute_efficient(bw)
|
def __init__(
self, endog, exog, var_type, reg_type, bw="cv_ls", censor_val=0, defaults=None
):
self.var_type = var_type
self.data_type = var_type
self.reg_type = reg_type
self.k_vars = len(self.var_type)
self.endog = _adjust_shape(endog, 1)
self.exog = _adjust_shape(exog, self.k_vars)
self.data = np.column_stack((self.endog, self.exog))
self.nobs = np.shape(self.exog)[0]
self.bw_func = dict(cv_ls=self.cv_loo, aic=self.aic_hurvich)
self.est = dict(lc=self._est_loc_constant, ll=self._est_loc_linear)
defaults = EstimatorSettings() if defaults is None else defaults
self._set_defaults(defaults)
self.censor_val = censor_val
if self.censor_val is not None:
self.censored(censor_val)
else:
self.W_in = np.ones((self.nobs, 1))
if not self.efficient:
self.bw = self._compute_reg_bw(bw)
else:
self.bw = self._compute_efficient(bw)
|
https://github.com/statsmodels/statsmodels/issues/4772
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\[...]\lib\site-packages\statsmodels\graphics\functional.py", line 32, in _pickle_method
if m.im_self is None:
AttributeError: 'function' object has no attribute 'im_self'
|
AttributeError
|
def __init__(
self,
t=None,
F=None,
sd=None,
effect=None,
df_denom=None,
df_num=None,
alpha=0.05,
**kwds,
):
self.effect = effect # Let it be None for F
if F is not None:
self.distribution = "F"
self.fvalue = F
self.statistic = self.fvalue
self.df_denom = df_denom
self.df_num = df_num
self.dist = fdist
self.dist_args = (df_num, df_denom)
self.pvalue = fdist.sf(F, df_num, df_denom)
elif t is not None:
self.distribution = "t"
self.tvalue = t
self.statistic = t # generic alias
self.sd = sd
self.df_denom = df_denom
self.dist = student_t
self.dist_args = (df_denom,)
self.pvalue = self.dist.sf(np.abs(t), df_denom) * 2
elif "statistic" in kwds:
# TODO: currently targeted to normal distribution, and chi2
self.distribution = kwds["distribution"]
self.statistic = kwds["statistic"]
self.tvalue = value = kwds["statistic"] # keep alias
# TODO: for results instance we decided to use tvalues also for normal
self.sd = sd
self.dist = getattr(stats, self.distribution)
self.dist_args = kwds.get("dist_args", ())
if self.distribution is "chi2":
self.pvalue = self.dist.sf(self.statistic, df_denom)
self.df_denom = df_denom
else:
"normal"
self.pvalue = np.full_like(value, np.nan)
not_nan = ~np.isnan(value)
self.pvalue[not_nan] = self.dist.sf(np.abs(value[not_nan])) * 2
# cleanup
# should we return python scalar?
self.pvalue = np.squeeze(self.pvalue)
|
def __init__(
self,
t=None,
F=None,
sd=None,
effect=None,
df_denom=None,
df_num=None,
alpha=0.05,
**kwds,
):
self.effect = effect # Let it be None for F
if F is not None:
self.distribution = "F"
self.fvalue = F
self.statistic = self.fvalue
self.df_denom = df_denom
self.df_num = df_num
self.dist = fdist
self.dist_args = (df_num, df_denom)
self.pvalue = fdist.sf(F, df_num, df_denom)
elif t is not None:
self.distribution = "t"
self.tvalue = t
self.statistic = t # generic alias
self.sd = sd
self.df_denom = df_denom
self.dist = student_t
self.dist_args = (df_denom,)
self.pvalue = self.dist.sf(np.abs(t), df_denom) * 2
elif "statistic" in kwds:
# TODO: currently targeted to normal distribution, and chi2
self.distribution = kwds["distribution"]
self.statistic = kwds["statistic"]
self.tvalue = value = kwds["statistic"] # keep alias
# TODO: for results instance we decided to use tvalues also for normal
self.sd = sd
self.dist = getattr(stats, self.distribution)
self.dist_args = ()
if self.distribution is "chi2":
self.pvalue = self.dist.sf(self.statistic, df_denom)
else:
"normal"
self.pvalue = np.full_like(value, np.nan)
not_nan = ~np.isnan(value)
self.pvalue[not_nan] = self.dist.sf(np.abs(value[not_nan])) * 2
# cleanup
# should we return python scalar?
self.pvalue = np.squeeze(self.pvalue)
|
https://github.com/statsmodels/statsmodels/issues/4588
|
resols2r.wald_test(np.eye(len(resols2r.params)))
---------------------------------------------------------------------------
LinAlgError Traceback (most recent call last)
<ipython-input-18-702444fbfca8> in <module>()
----> 1 resols2r.wald_test(np.eye(len(resols2r.params)))
...\statsmodels\base\model.py in wald_test(self, r_matrix, cov_p, scale, invcov, use_f)
1665 "dimensions that are asymptotically "
1666 "non-normal")
-> 1667 invcov = np.linalg.inv(cov_p)
1668
1669 if (hasattr(self, 'mle_settings') and
...\python-3.4.4.amd64\lib\site-packages\numpy\linalg\linalg.py in inv(a)
524 signature = 'D->D' if isComplexType(t) else 'd->d'
525 extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
--> 526 ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
527 return wrap(ainv.astype(result_t, copy=False))
528
...\python-3.4.4.amd64\lib\site-packages\numpy\linalg\linalg.py in _raise_linalgerror_singular(err, flag)
88
89 def _raise_linalgerror_singular(err, flag):
---> 90 raise LinAlgError("Singular matrix")
91
92 def _raise_linalgerror_nonposdef(err, flag):
LinAlgError: Singular matrix
|
LinAlgError
|
def summary(self, xname=None, alpha=0.05, title=None):
"""Summarize the Results of the hypothesis test
Parameters
-----------
xname : list of strings, optional
Default is `c_##` for ## in p the number of regressors
alpha : float
significance level for the confidence intervals. Default is
alpha = 0.05 which implies a confidence level of 95%.
title : string, optional
Title for the params table. If not None, then this replaces the
default title
Returns
-------
smry : string or Summary instance
This contains a parameter results table in the case of t or z test
in the same form as the parameter results table in the model
results summary.
For F or Wald test, the return is a string.
"""
if self.effect is not None:
# TODO: should also add some extra information, e.g. robust cov ?
# TODO: can we infer names for constraints, xname in __init__ ?
if title is None:
title = "Test for Constraints"
elif title == "":
# don't add any title,
# I think SimpleTable skips on None - check
title = None
# we have everything for a params table
use_t = self.distribution == "t"
yname = "constraints" # Not used in params_frame
if xname is None:
xname = ["c%d" % ii for ii in range(len(self.effect))]
from statsmodels.iolib.summary import summary_params
pvalues = np.atleast_1d(self.pvalue)
summ = summary_params(
(self, self.effect, self.sd, self.statistic, pvalues, self.conf_int(alpha)),
yname=yname,
xname=xname,
use_t=use_t,
title=title,
alpha=alpha,
)
return summ
elif hasattr(self, "fvalue"):
# TODO: create something nicer for these casee
return "<F test: F=%s, p=%s, df_denom=%d, df_num=%d>" % (
repr(self.fvalue),
self.pvalue,
self.df_denom,
self.df_num,
)
elif self.distribution == "chi2":
return "<Wald test (%s): statistic=%s, p-value=%s, df_denom=%d>" % (
self.distribution,
self.statistic,
self.pvalue,
self.df_denom,
)
else:
# generic
return "<Wald test: statistic=%s, p-value=%s>" % (self.statistic, self.pvalue)
|
def summary(self, xname=None, alpha=0.05, title=None):
"""Summarize the Results of the hypothesis test
Parameters
-----------
xname : list of strings, optional
Default is `c_##` for ## in p the number of regressors
alpha : float
significance level for the confidence intervals. Default is
alpha = 0.05 which implies a confidence level of 95%.
title : string, optional
Title for the params table. If not None, then this replaces the
default title
Returns
-------
smry : string or Summary instance
This contains a parameter results table in the case of t or z test
in the same form as the parameter results table in the model
results summary.
For F or Wald test, the return is a string.
"""
if self.effect is not None:
# TODO: should also add some extra information, e.g. robust cov ?
# TODO: can we infer names for constraints, xname in __init__ ?
if title is None:
title = "Test for Constraints"
elif title == "":
# don't add any title,
# I think SimpleTable skips on None - check
title = None
# we have everything for a params table
use_t = self.distribution == "t"
yname = "constraints" # Not used in params_frame
if xname is None:
xname = ["c%d" % ii for ii in range(len(self.effect))]
from statsmodels.iolib.summary import summary_params
pvalues = np.atleast_1d(self.pvalue)
summ = summary_params(
(self, self.effect, self.sd, self.statistic, pvalues, self.conf_int(alpha)),
yname=yname,
xname=xname,
use_t=use_t,
title=title,
alpha=alpha,
)
return summ
elif hasattr(self, "fvalue"):
# TODO: create something nicer for these casee
return "<F test: F=%s, p=%s, df_denom=%d, df_num=%d>" % (
repr(self.fvalue),
self.pvalue,
self.df_denom,
self.df_num,
)
else:
# generic
return "<Wald test: statistic=%s, p-value=%s>" % (self.statistic, self.pvalue)
|
https://github.com/statsmodels/statsmodels/issues/4588
|
resols2r.wald_test(np.eye(len(resols2r.params)))
---------------------------------------------------------------------------
LinAlgError Traceback (most recent call last)
<ipython-input-18-702444fbfca8> in <module>()
----> 1 resols2r.wald_test(np.eye(len(resols2r.params)))
...\statsmodels\base\model.py in wald_test(self, r_matrix, cov_p, scale, invcov, use_f)
1665 "dimensions that are asymptotically "
1666 "non-normal")
-> 1667 invcov = np.linalg.inv(cov_p)
1668
1669 if (hasattr(self, 'mle_settings') and
...\python-3.4.4.amd64\lib\site-packages\numpy\linalg\linalg.py in inv(a)
524 signature = 'D->D' if isComplexType(t) else 'd->d'
525 extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
--> 526 ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
527 return wrap(ainv.astype(result_t, copy=False))
528
...\python-3.4.4.amd64\lib\site-packages\numpy\linalg\linalg.py in _raise_linalgerror_singular(err, flag)
88
89 def _raise_linalgerror_singular(err, flag):
---> 90 raise LinAlgError("Singular matrix")
91
92 def _raise_linalgerror_nonposdef(err, flag):
LinAlgError: Singular matrix
|
LinAlgError
|
def _fit_start_params_hr(self, order, start_ar_lags=None):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
start_ar_lags : int, optional
If start_ar_lags is not None, rather than fitting an AR process
according to best BIC, fits an AR process with a lag length equal
to start_ar_lags.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength start_ar_lags, or
selected according to best BIC if start_ar_lags is None. Obtain the
residuals. Then fit an ARMA(p,q) model via OLS using these residuals
for a first approximation. Uses a separate OLS regression to find the
coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
Durbin, J. 1960. "The Fitting of Time-Series Models."
`Review of the International Statistical Institute`. Vol. 28, No. 3
"""
p, q, k = order
start_params = zeros((p + q + k))
# make copy of endog because overwritten
endog = np.array(self.endog, np.float64)
exog = self.exog
if k != 0:
ols_params = OLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
# make sure we don't run into small data problems in AR fit
nobs = len(endog)
if start_ar_lags is None:
maxlag = int(round(12 * (nobs / 100.0) ** (1 / 4.0)))
if maxlag >= nobs:
maxlag = nobs - 1
armod = AR(endog).fit(ic="bic", trend="nc", maxlag=maxlag)
else:
if start_ar_lags >= nobs:
start_ar_lags = nobs - 1
armod = AR(endog).fit(trend="nc", maxlag=start_ar_lags)
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
# it's possible in small samples that optimal lag-order
# doesn't leave enough obs. No consistent way to fix.
if p_tmp + q >= len(endog):
raise ValueError(
"Proper starting parameters cannot"
" be found for this order with this "
"number of observations. Use the "
"start_params argument, or set "
"start_ar_lags to an integer less than "
"len(endog) - q."
)
resid = endog[p_tmp:] - np.dot(
lagmat(endog, p_tmp, trim="both"), arcoefs_tmp
)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, "both")[endog_start:]
lag_resid = lagmat(resid, q, "both")[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = OLS(endog[max(p_tmp + q, p) :], X).fit().params
start_params[k : k + p + q] = coefs
else:
start_params[k + p : k + p + q] = yule_walker(endog, order=q)[0]
if q == 0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k : k + p] = arcoefs
# check AR coefficients
if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k : k + p]])) < 1):
raise ValueError(
"The computed initial AR coefficients are not "
"stationary\nYou should induce stationarity, "
"choose a different model order, or you can\n"
"pass your own start_params."
)
# check MA coefficients
elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p :]])) < 1):
raise ValueError(
"The computed initial MA coefficients are not "
"invertible\nYou should induce invertibility, "
"choose a different model order, or you can\n"
"pass your own start_params."
)
# check MA coefficients
return start_params
|
def _fit_start_params_hr(self, order, start_ar_lags=None):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
start_ar_lags : int, optional
If start_ar_lags is not None, rather than fitting an AR process
according to best BIC, fits an AR process with a lag length equal
to start_ar_lags.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength start_ar_lags, or
selected according to best BIC if start_ar_lags is None. Obtain the
residuals. Then fit an ARMA(p,q) model via OLS using these residuals
for a first approximation. Uses a separate OLS regression to find the
coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
Durbin, J. 1960. "The Fitting of Time-Series Models."
`Review of the International Statistical Institute`. Vol. 28, No. 3
"""
p, q, k = order
start_params = zeros((p + q + k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
# make sure we don't run into small data problems in AR fit
nobs = len(endog)
if start_ar_lags is None:
maxlag = int(round(12 * (nobs / 100.0) ** (1 / 4.0)))
if maxlag >= nobs:
maxlag = nobs - 1
armod = AR(endog).fit(ic="bic", trend="nc", maxlag=maxlag)
else:
if start_ar_lags >= nobs:
start_ar_lags = nobs - 1
armod = AR(endog).fit(trend="nc", maxlag=start_ar_lags)
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
# it's possible in small samples that optimal lag-order
# doesn't leave enough obs. No consistent way to fix.
if p_tmp + q >= len(endog):
raise ValueError(
"Proper starting parameters cannot"
" be found for this order with this "
"number of observations. Use the "
"start_params argument, or set "
"start_ar_lags to an integer less than "
"len(endog) - q."
)
resid = endog[p_tmp:] - np.dot(
lagmat(endog, p_tmp, trim="both"), arcoefs_tmp
)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, "both")[endog_start:]
lag_resid = lagmat(resid, q, "both")[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp + q, p) :], X).fit().params
start_params[k : k + p + q] = coefs
else:
start_params[k + p : k + p + q] = yule_walker(endog, order=q)[0]
if q == 0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k : k + p] = arcoefs
# check AR coefficients
if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k : k + p]])) < 1):
raise ValueError(
"The computed initial AR coefficients are not "
"stationary\nYou should induce stationarity, "
"choose a different model order, or you can\n"
"pass your own start_params."
)
# check MA coefficients
elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p :]])) < 1):
raise ValueError(
"The computed initial MA coefficients are not "
"invertible\nYou should induce invertibility, "
"choose a different model order, or you can\n"
"pass your own start_params."
)
# check MA coefficients
return start_params
|
https://github.com/statsmodels/statsmodels/issues/3504
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-51-964a74d33f5a> in <module>()
5
6 model = sm.tsa.arima_model.ARIMA(ts, order=(0, 2, 0))
----> 7 fitted = model.fit(disp=-1)
8
9
/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py in fit(self, start_params, trend, method, transparams, solver, maxiter, full_output, disp, callback, start_ar_lags, **kwargs)
1149 method, transparams, solver,
1150 maxiter, full_output, disp,
-> 1151 callback, start_ar_lags, **kwargs)
1152 normalized_cov_params = None # TODO: fix this?
1153 arima_fit = ARIMAResults(self, mlefit._results.params,
/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py in fit(self, start_params, trend, method, transparams, solver, maxiter, full_output, disp, callback, start_ar_lags, **kwargs)
954 else: # estimate starting parameters
955 start_params = self._fit_start_params((k_ar, k_ma, k), method,
--> 956 start_ar_lags)
957
958 if transparams: # transform initial parameters to ensure invertibility
/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py in _fit_start_params(self, order, method, start_ar_lags)
572 def _fit_start_params(self, order, method, start_ar_lags=None):
573 if method != 'css-mle': # use Hannan-Rissanen to get start params
--> 574 start_params = self._fit_start_params_hr(order, start_ar_lags)
575 else: # use CSS to get start params
576 func = lambda params: -self.loglike_css(params)
/usr/local/lib/python3.5/dist-packages/statsmodels/tsa/arima_model.py in _fit_start_params_hr(self, order, start_ar_lags)
506 ols_params = GLS(endog, exog).fit().params
507 start_params[:k] = ols_params
--> 508 endog -= np.dot(exog, ols_params).squeeze()
509 if q != 0:
510 if p != 0:
TypeError: Cannot cast ufunc subtract output from dtype('float64') to dtype('int64') with casting rule 'same_kind'
|
TypeError
|
def plot_simultaneous(
self, comparison_name=None, ax=None, figsize=(10, 6), xlabel=None, ylabel=None
):
"""Plot a universal confidence interval of each group mean
Visiualize significant differences in a plot with one confidence
interval per group instead of all pairwise confidence intervals.
Parameters
----------
comparison_name : string, optional
if provided, plot_intervals will color code all groups that are
significantly different from the comparison_name red, and will
color code insignificant groups gray. Otherwise, all intervals will
just be plotted in black.
ax : matplotlib axis, optional
An axis handle on which to attach the plot.
figsize : tuple, optional
tuple for the size of the figure generated
xlabel : string, optional
Name to be displayed on x axis
ylabel : string, optional
Name to be displayed on y axis
Returns
-------
fig : Matplotlib Figure object
handle to figure object containing interval plots
Notes
-----
Multiple comparison tests are nice, but lack a good way to be
visualized. If you have, say, 6 groups, showing a graph of the means
between each group will require 15 confidence intervals.
Instead, we can visualize inter-group differences with a single
interval for each group mean. Hochberg et al. [1] first proposed this
idea and used Tukey's Q critical value to compute the interval widths.
Unlike plotting the differences in the means and their respective
confidence intervals, any two pairs can be compared for significance
by looking for overlap.
References
----------
.. [*] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.
Examples
--------
>>> from statsmodels.examples.try_tukey_hsd import cylinders, cyl_labels
>>> from statsmodels.stats.multicomp import MultiComparison
>>> cardata = MultiComparison(cylinders, cyl_labels)
>>> results = cardata.tukeyhsd()
>>> results.plot_simultaneous()
<matplotlib.figure.Figure at 0x...>
This example shows an example plot comparing significant differences
in group means. Significant differences at the alpha=0.05 level can be
identified by intervals that do not overlap (i.e. USA vs Japan,
USA vs Germany).
>>> results.plot_simultaneous(comparison_name="USA")
<matplotlib.figure.Figure at 0x...>
Optionally provide one of the group names to color code the plot to
highlight group means different from comparison_name.
"""
fig, ax1 = utils.create_mpl_ax(ax)
if figsize is not None:
fig.set_size_inches(figsize)
if getattr(self, "halfwidths", None) is None:
self._simultaneous_ci()
means = self._multicomp.groupstats.groupmean
sigidx = []
nsigidx = []
minrange = [means[i] - self.halfwidths[i] for i in range(len(means))]
maxrange = [means[i] + self.halfwidths[i] for i in range(len(means))]
if comparison_name is None:
ax1.errorbar(
means,
lrange(len(means)),
xerr=self.halfwidths,
marker="o",
linestyle="None",
color="k",
ecolor="k",
)
else:
if comparison_name not in self.groupsunique:
raise ValueError("comparison_name not found in group names.")
midx = np.where(self.groupsunique == comparison_name)[0][0]
for i in range(len(means)):
if self.groupsunique[i] == comparison_name:
continue
if min(maxrange[i], maxrange[midx]) - max(minrange[i], minrange[midx]) < 0:
sigidx.append(i)
else:
nsigidx.append(i)
# Plot the master comparison
ax1.errorbar(
means[midx],
midx,
xerr=self.halfwidths[midx],
marker="o",
linestyle="None",
color="b",
ecolor="b",
)
ax1.plot(
[minrange[midx]] * 2,
[-1, self._multicomp.ngroups],
linestyle="--",
color="0.7",
)
ax1.plot(
[maxrange[midx]] * 2,
[-1, self._multicomp.ngroups],
linestyle="--",
color="0.7",
)
# Plot those that are significantly different
if len(sigidx) > 0:
ax1.errorbar(
means[sigidx],
sigidx,
xerr=self.halfwidths[sigidx],
marker="o",
linestyle="None",
color="r",
ecolor="r",
)
# Plot those that are not significantly different
if len(nsigidx) > 0:
ax1.errorbar(
means[nsigidx],
nsigidx,
xerr=self.halfwidths[nsigidx],
marker="o",
linestyle="None",
color="0.5",
ecolor="0.5",
)
ax1.set_title("Multiple Comparisons Between All Pairs (Tukey)")
r = np.max(maxrange) - np.min(minrange)
ax1.set_ylim([-1, self._multicomp.ngroups])
ax1.set_xlim([np.min(minrange) - r / 10.0, np.max(maxrange) + r / 10.0])
ax1.set_yticklabels(np.insert(self.groupsunique.astype(str), 0, ""))
ax1.set_yticks(np.arange(-1, len(means) + 1))
ax1.set_xlabel(xlabel if xlabel is not None else "")
ax1.set_ylabel(ylabel if ylabel is not None else "")
return fig
|
def plot_simultaneous(
self, comparison_name=None, ax=None, figsize=(10, 6), xlabel=None, ylabel=None
):
"""Plot a universal confidence interval of each group mean
Visiualize significant differences in a plot with one confidence
interval per group instead of all pairwise confidence intervals.
Parameters
----------
comparison_name : string, optional
if provided, plot_intervals will color code all groups that are
significantly different from the comparison_name red, and will
color code insignificant groups gray. Otherwise, all intervals will
just be plotted in black.
ax : matplotlib axis, optional
An axis handle on which to attach the plot.
figsize : tuple, optional
tuple for the size of the figure generated
xlabel : string, optional
Name to be displayed on x axis
ylabel : string, optional
Name to be displayed on y axis
Returns
-------
fig : Matplotlib Figure object
handle to figure object containing interval plots
Notes
-----
Multiple comparison tests are nice, but lack a good way to be
visualized. If you have, say, 6 groups, showing a graph of the means
between each group will require 15 confidence intervals.
Instead, we can visualize inter-group differences with a single
interval for each group mean. Hochberg et al. [1] first proposed this
idea and used Tukey's Q critical value to compute the interval widths.
Unlike plotting the differences in the means and their respective
confidence intervals, any two pairs can be compared for significance
by looking for overlap.
References
----------
.. [*] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.
Examples
--------
>>> from statsmodels.examples.try_tukey_hsd import cylinders, cyl_labels
>>> from statsmodels.stats.multicomp import MultiComparison
>>> cardata = MultiComparison(cylinders, cyl_labels)
>>> results = cardata.tukeyhsd()
>>> results.plot_simultaneous()
<matplotlib.figure.Figure at 0x...>
This example shows an example plot comparing significant differences
in group means. Significant differences at the alpha=0.05 level can be
identified by intervals that do not overlap (i.e. USA vs Japan,
USA vs Germany).
>>> results.plot_simultaneous(comparison_name="USA")
<matplotlib.figure.Figure at 0x...>
Optionally provide one of the group names to color code the plot to
highlight group means different from comparison_name.
"""
fig, ax1 = utils.create_mpl_ax(ax)
if figsize is not None:
fig.set_size_inches(figsize)
if getattr(self, "halfwidths", None) is None:
self._simultaneous_ci()
means = self._multicomp.groupstats.groupmean
sigidx = []
nsigidx = []
minrange = [means[i] - self.halfwidths[i] for i in range(len(means))]
maxrange = [means[i] + self.halfwidths[i] for i in range(len(means))]
if comparison_name is None:
ax1.errorbar(
means,
lrange(len(means)),
xerr=self.halfwidths,
marker="o",
linestyle="None",
color="k",
ecolor="k",
)
else:
if comparison_name not in self.groupsunique:
raise ValueError("comparison_name not found in group names.")
midx = np.where(self.groupsunique == comparison_name)[0]
for i in range(len(means)):
if self.groupsunique[i] == comparison_name:
continue
if min(maxrange[i], maxrange[midx]) - max(minrange[i], minrange[midx]) < 0:
sigidx.append(i)
else:
nsigidx.append(i)
# Plot the master comparison
ax1.errorbar(
means[midx],
midx,
xerr=self.halfwidths[midx],
marker="o",
linestyle="None",
color="b",
ecolor="b",
)
ax1.plot(
[minrange[midx]] * 2,
[-1, self._multicomp.ngroups],
linestyle="--",
color="0.7",
)
ax1.plot(
[maxrange[midx]] * 2,
[-1, self._multicomp.ngroups],
linestyle="--",
color="0.7",
)
# Plot those that are significantly different
if len(sigidx) > 0:
ax1.errorbar(
means[sigidx],
sigidx,
xerr=self.halfwidths[sigidx],
marker="o",
linestyle="None",
color="r",
ecolor="r",
)
# Plot those that are not significantly different
if len(nsigidx) > 0:
ax1.errorbar(
means[nsigidx],
nsigidx,
xerr=self.halfwidths[nsigidx],
marker="o",
linestyle="None",
color="0.5",
ecolor="0.5",
)
ax1.set_title("Multiple Comparisons Between All Pairs (Tukey)")
r = np.max(maxrange) - np.min(minrange)
ax1.set_ylim([-1, self._multicomp.ngroups])
ax1.set_xlim([np.min(minrange) - r / 10.0, np.max(maxrange) + r / 10.0])
ax1.set_yticklabels(np.insert(self.groupsunique.astype(str), 0, ""))
ax1.set_yticks(np.arange(-1, len(means) + 1))
ax1.set_xlabel(xlabel if xlabel is not None else "")
ax1.set_ylabel(ylabel if ylabel is not None else "")
return fig
|
https://github.com/statsmodels/statsmodels/issues/3584
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-1-35117e389add> in <module>()
35 alpha=0.05) # Significance level
36
---> 37 tukey.plot_simultaneous(comparison_name = 'white') # Plot group confidence intervals
38 tukey.summary()
39 print(np.version.version)
c:\Anaconda3\lib\site-packages\statsmodels\sandbox\stats\multicomp.py in plot_simultaneous(self, comparison_name, ax, figsize, xlabel, ylabel)
733 if self.groupsunique[i] == comparison_name:
734 continue
--> 735 if (min(maxrange[i], maxrange[midx]) -
736 max(minrange[i], minrange[midx]) < 0):
737 sigidx.append(i)
TypeError: only integer scalar arrays can be converted to a scalar index
|
TypeError
|
def medcouple(y, axis=0):
"""
Calculates the medcouple robust measure of skew.
Parameters
----------
y : array-like
axis : int or None, optional
Axis along which the medcouple statistic is computed. If `None`, the
entire array is used.
Returns
-------
mc : ndarray
The medcouple statistic with the same shape as `y`, with the specified
axis removed.
Notes
-----
The current algorithm requires a O(N**2) memory allocations, and so may
not work for very large arrays (N>10000).
.. [*] M. Huberta and E. Vandervierenb, "An adjusted boxplot for skewed
distributions" Computational Statistics & Data Analysis, vol. 52, pp.
5186-5201, August 2008.
"""
y = np.asarray(y, dtype=np.double) # GH 4243
if axis is None:
return _medcouple_1d(y.ravel())
return np.apply_along_axis(_medcouple_1d, axis, y)
|
def medcouple(y, axis=0):
"""
Calculates the medcouple robust measure of skew.
Parameters
----------
y : array-like
axis : int or None, optional
Axis along which the medcouple statistic is computed. If `None`, the
entire array is used.
Returns
-------
mc : ndarray
The medcouple statistic with the same shape as `y`, with the specified
axis removed.
Notes
-----
The current algorithm requires a O(N**2) memory allocations, and so may
not work for very large arrays (N>10000).
.. [*] M. Huberta and E. Vandervierenb, "An adjusted boxplot for skewed
distributions" Computational Statistics & Data Analysis, vol. 52, pp.
5186-5201, August 2008.
"""
if axis is None:
return _medcouple_1d(y.ravel())
return np.apply_along_axis(_medcouple_1d, axis, y)
|
https://github.com/statsmodels/statsmodels/issues/4243
|
from statsmodels.stats.stattools import medcouple
medcouple(np.arange(9))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...\statsmodels\stats\stattools.py", line 451, in medcouple
return np.apply_along_axis(_medcouple_1d, axis, y)
File "C:\...\python-3.4.4.amd64\lib\site-packages\numpy\lib\shape_base.py", line 91, in apply_along_axis
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
File "...\statsmodels\stats\stattools.py", line 417, in _medcouple_1d
standardization[is_zero] = np.inf
OverflowError: cannot convert float infinity to integer
medcouple(np.arange(9)*1.0)
array(0.0)
medcouple(np.random.randn(9))
array(0.03126347622708405)
medcouple(np.random.randn(8))
array(0.0072095663649526616)
|
OverflowError
|
def summary2(self, alpha=0.05, float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
-----------
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_dict(summary2.summary_model(self))
# One data frame per value of endog
eqn = self.params.shape[1]
confint = self.conf_int(alpha)
for i in range(eqn):
coefs = summary2.summary_params(
(
self,
self.params[:, i],
self.bse[:, i],
self.tvalues[:, i],
self.pvalues[:, i],
confint[i],
),
alpha=alpha,
)
# Header must show value of endog
level_str = self.model.endog_names + " = " + str(i)
coefs[level_str] = coefs.index
coefs = coefs.iloc[:, [-1, 0, 1, 2, 3, 4, 5]]
smry.add_df(coefs, index=False, header=True, float_format=float_format)
smry.add_title(results=self)
return smry
|
def summary2(self, alpha=0.05, float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
-----------
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_dict(summary2.summary_model(self))
# One data frame per value of endog
eqn = self.params.shape[1]
confint = self.conf_int(alpha)
for i in range(eqn):
coefs = summary2.summary_params(
self,
alpha,
self.params[:, i],
self.bse[:, i],
self.tvalues[:, i],
self.pvalues[:, i],
confint[i],
)
# Header must show value of endog
level_str = self.model.endog_names + " = " + str(i)
coefs[level_str] = coefs.index
coefs = coefs.iloc[:, [-1, 0, 1, 2, 3, 4, 5]]
smry.add_df(coefs, index=False, header=True, float_format=float_format)
smry.add_title(results=self)
return smry
|
https://github.com/statsmodels/statsmodels/issues/3651
|
======================================================================
ERROR: statsmodels.discrete.tests.test_discrete.test_mnlogit_factor
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.8.0-py2.7-linux-x86_64.egg/statsmodels/discrete/tests/test_discrete.py", line 1436, in test_mnlogit_factor
summary2 = res.summary2()
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.8.0-py2.7-linux-x86_64.egg/statsmodels/discrete/discrete_model.py", line 3028, in summary2
confint[i])
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.8.0-py2.7-linux-x86_64.egg/statsmodels/iolib/summary2.py", line 339, in summary_params
data = np.hstack([data, conf_int])
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/numpy/core/shape_base.py", line 277, in hstack
return _nx.concatenate(arrs, 1)
ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
ValueError
|
def summary_params(
results,
yname=None,
xname=None,
alpha=0.05,
use_t=True,
skip_header=False,
float_format="%.4f",
):
"""create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
"""
if isinstance(results, tuple):
results, params, bse, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = [
"Coef.",
"Std.Err.",
"t",
"P>|t|",
"[" + str(alpha / 2),
str(1 - alpha / 2) + "]",
]
else:
data.columns = [
"Coef.",
"Std.Err.",
"z",
"P>|z|",
"[" + str(alpha / 2),
str(1 - alpha / 2) + "]",
]
if not xname:
data.index = results.model.exog_names
else:
data.index = xname
return data
|
def summary_params(
results,
yname=None,
xname=None,
alpha=0.05,
use_t=True,
skip_header=False,
float_format="%.4f",
):
"""create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
"""
if isinstance(results, tuple):
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = [
"Coef.",
"Std.Err.",
"t",
"P>|t|",
"[" + str(alpha / 2),
str(1 - alpha / 2) + "]",
]
else:
data.columns = [
"Coef.",
"Std.Err.",
"z",
"P>|z|",
"[" + str(alpha / 2),
str(1 - alpha / 2) + "]",
]
if not xname:
data.index = results.model.exog_names
else:
data.index = xname
return data
|
https://github.com/statsmodels/statsmodels/issues/3651
|
======================================================================
ERROR: statsmodels.discrete.tests.test_discrete.test_mnlogit_factor
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.8.0-py2.7-linux-x86_64.egg/statsmodels/discrete/tests/test_discrete.py", line 1436, in test_mnlogit_factor
summary2 = res.summary2()
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.8.0-py2.7-linux-x86_64.egg/statsmodels/discrete/discrete_model.py", line 3028, in summary2
confint[i])
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.8.0-py2.7-linux-x86_64.egg/statsmodels/iolib/summary2.py", line 339, in summary_params
data = np.hstack([data, conf_int])
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python2.7/site-packages/numpy/core/shape_base.py", line 277, in hstack
return _nx.concatenate(arrs, 1)
ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
ValueError
|
def acorr_ljungbox(x, lags=None, boxpierce=False):
"""
Ljung-Box test for no autocorrelation
Parameters
----------
x : array_like, 1d
data series, regression residuals when used as diagnostic test
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag length.
If lags is a list or array, then all lags are included up to the largest
lag in the list, however only the tests for the lags in the list are
reported.
If lags is None, then the default maxlag is 'min((nobs // 2 - 2), 40)'
boxpierce : {False, True}
If true, then additional to the results of the Ljung-Box test also the
Box-Pierce test results are returned
Returns
-------
lbvalue : float or array
test statistic
pvalue : float or array
p-value based on chi-square distribution
bpvalue : (optionsal), float or array
test statistic for Box-Pierce test
bppvalue : (optional), float or array
p-value based for Box-Pierce test on chi-square distribution
Notes
-----
Ljung-Box and Box-Pierce statistic differ in their scaling of the
autocorrelation function. Ljung-Box test is reported to have better
small sample properties.
TODO: could be extended to work with more than one series
1d or nd ? axis ? ravel ?
needs more testing
*Verification*
Looks correctly sized in Monte Carlo studies.
not yet compared to verified values
Examples
--------
see example script
References
----------
Greene
Wikipedia
"""
x = np.asarray(x)
nobs = x.shape[0]
if lags is None:
lags = np.arange(1, min((nobs // 2 - 2), 40) + 1)
elif isinstance(lags, (int, long)):
lags = np.arange(1, lags + 1)
lags = np.asarray(lags)
maxlag = max(lags)
acfx = acf(x, nlags=maxlag) # normalize by nobs not (nobs-nlags)
# SS: unbiased=False is default now
acf2norm = acfx[1 : maxlag + 1] ** 2 / (nobs - np.arange(1, maxlag + 1))
qljungbox = nobs * (nobs + 2) * np.cumsum(acf2norm)[lags - 1]
pval = stats.chi2.sf(qljungbox, lags)
if not boxpierce:
return qljungbox, pval
else:
qboxpierce = nobs * np.cumsum(acfx[1 : maxlag + 1] ** 2)[lags - 1]
pvalbp = stats.chi2.sf(qboxpierce, lags)
return qljungbox, pval, qboxpierce, pvalbp
|
def acorr_ljungbox(x, lags=None, boxpierce=False):
"""Ljung-Box test for no autocorrelation
Parameters
----------
x : array_like, 1d
data series, regression residuals when used as diagnostic test
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag length.
If lags is a list or array, then all lags are included up to the largest
lag in the list, however only the tests for the lags in the list are
reported.
If lags is None, then the default maxlag is 12*(nobs/100)^{1/4}
boxpierce : {False, True}
If true, then additional to the results of the Ljung-Box test also the
Box-Pierce test results are returned
Returns
-------
lbvalue : float or array
test statistic
pvalue : float or array
p-value based on chi-square distribution
bpvalue : (optionsal), float or array
test statistic for Box-Pierce test
bppvalue : (optional), float or array
p-value based for Box-Pierce test on chi-square distribution
Notes
-----
Ljung-Box and Box-Pierce statistic differ in their scaling of the
autocorrelation function. Ljung-Box test is reported to have better
small sample properties.
TODO: could be extended to work with more than one series
1d or nd ? axis ? ravel ?
needs more testing
''Verification''
Looks correctly sized in Monte Carlo studies.
not yet compared to verified values
Examples
--------
see example script
References
----------
Greene
Wikipedia
"""
x = np.asarray(x)
nobs = x.shape[0]
if lags is None:
lags = lrange(1, 41) # TODO: check default; SS: changed to 40
elif isinstance(lags, (int, long)):
lags = lrange(1, lags + 1)
maxlag = max(lags)
lags = np.asarray(lags)
acfx = acf(x, nlags=maxlag) # normalize by nobs not (nobs-nlags)
# SS: unbiased=False is default now
# acf2norm = acfx[1:maxlag+1]**2 / (nobs - np.arange(1,maxlag+1))
acf2norm = acfx[1 : maxlag + 1] ** 2 / (nobs - np.arange(1, maxlag + 1))
qljungbox = nobs * (nobs + 2) * np.cumsum(acf2norm)[lags - 1]
pval = stats.chi2.sf(qljungbox, lags)
if not boxpierce:
return qljungbox, pval
else:
qboxpierce = nobs * np.cumsum(acfx[1 : maxlag + 1] ** 2)[lags - 1]
pvalbp = stats.chi2.sf(qboxpierce, lags)
return qljungbox, pval, qboxpierce, pvalbp
|
https://github.com/statsmodels/statsmodels/issues/3229
|
from statsmodels.stats import diagnostic as diag
diag.acorr_ljungbox(np.random.random(50))[0].shape
(40,)
diag.acorr_ljungbox(np.random.random(20), lags=5)
(array([ 0.36718151, 1.02009595, 1.23734092, 3.75338034, 4.35387236]), array([ 0.54454461, 0.60046677, 0.74406305, 0.44040973, 0.49966951]))
diag.acorr_ljungbox(np.random.random(20))
Traceback (most recent call last):
File "<pyshell#543>", line 1, in <module>
diag.acorr_ljungbox(np.random.random(20))
File "m:\josef_new\eclipse_ws\statsmodels\statsmodels_py34_pr\statsmodels\sandbox\stats\diagnostic.py", line 295, in acorr_ljungbox
acf2norm = acfx[1:maxlag+1]**2 / (nobs - np.arange(1,maxlag+1))
ValueError: operands could not be broadcast together with shapes (19,) (40,)
|
ValueError
|
def acorr_ljungbox(x, lags=None, boxpierce=False):
"""
Ljung-Box test for no autocorrelation
Parameters
----------
x : array_like, 1d
data series, regression residuals when used as diagnostic test
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag length.
If lags is a list or array, then all lags are included up to the largest
lag in the list, however only the tests for the lags in the list are
reported.
If lags is None, then the default maxlag is 'min((nobs // 2 - 2), 40)'
boxpierce : {False, True}
If true, then additional to the results of the Ljung-Box test also the
Box-Pierce test results are returned
Returns
-------
lbvalue : float or array
test statistic
pvalue : float or array
p-value based on chi-square distribution
bpvalue : (optionsal), float or array
test statistic for Box-Pierce test
bppvalue : (optional), float or array
p-value based for Box-Pierce test on chi-square distribution
Notes
-----
Ljung-Box and Box-Pierce statistic differ in their scaling of the
autocorrelation function. Ljung-Box test is reported to have better
small sample properties.
TODO: could be extended to work with more than one series
1d or nd ? axis ? ravel ?
needs more testing
*Verification*
Looks correctly sized in Monte Carlo studies.
not yet compared to verified values
Examples
--------
see example script
References
----------
Greene
Wikipedia
"""
x = np.asarray(x)
nobs = x.shape[0]
if lags is None:
lags = np.arange(1, min((nobs // 2 - 2), 40) + 1)
elif isinstance(lags, (int, long)):
lags = np.arange(1, lags + 1)
lags = np.asarray(lags)
maxlag = max(lags)
acfx = acf(x, nlags=maxlag) # normalize by nobs not (nobs-nlags)
# SS: unbiased=False is default now
acf2norm = acfx[1 : maxlag + 1] ** 2 / (nobs - np.arange(1, maxlag + 1))
qljungbox = nobs * (nobs + 2) * np.cumsum(acf2norm)[lags - 1]
pval = stats.chi2.sf(qljungbox, lags)
if not boxpierce:
return qljungbox, pval
else:
qboxpierce = nobs * np.cumsum(acfx[1 : maxlag + 1] ** 2)[lags - 1]
pvalbp = stats.chi2.sf(qboxpierce, lags)
return qljungbox, pval, qboxpierce, pvalbp
|
def acorr_ljungbox(x, lags=None, boxpierce=False):
"""Ljung-Box test for no autocorrelation
Parameters
----------
x : array_like, 1d
data series, regression residuals when used as diagnostic test
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag length.
If lags is a list or array, then all lags are included up to the largest
lag in the list, however only the tests for the lags in the list are
reported.
If lags is None, then the default maxlag is 'min((nobs // 2 - 2), 40)'
boxpierce : {False, True}
If true, then additional to the results of the Ljung-Box test also the
Box-Pierce test results are returned
Returns
-------
lbvalue : float or array
test statistic
pvalue : float or array
p-value based on chi-square distribution
bpvalue : (optionsal), float or array
test statistic for Box-Pierce test
bppvalue : (optional), float or array
p-value based for Box-Pierce test on chi-square distribution
Notes
-----
Ljung-Box and Box-Pierce statistic differ in their scaling of the
autocorrelation function. Ljung-Box test is reported to have better
small sample properties.
TODO: could be extended to work with more than one series
1d or nd ? axis ? ravel ?
needs more testing
''Verification''
Looks correctly sized in Monte Carlo studies.
not yet compared to verified values
Examples
--------
see example script
References
----------
Greene
Wikipedia
"""
x = np.asarray(x)
nobs = x.shape[0]
if lags is None:
lags = lrange(1, min((nobs // 2 - 2), 40) + 1)
elif isinstance(lags, (int, long)):
lags = lrange(1, lags + 1)
maxlag = lags[-1]
lags = np.asarray(lags)
acfx = acf(x, nlags=maxlag) # normalize by nobs not (nobs-nlags)
# SS: unbiased=False is default now
acf2norm = acfx[1 : maxlag + 1] ** 2 / (nobs - np.arange(1, maxlag + 1))
qljungbox = nobs * (nobs + 2) * np.cumsum(acf2norm)[lags - 1]
pval = stats.chi2.sf(qljungbox, lags)
if not boxpierce:
return qljungbox, pval
else:
qboxpierce = nobs * np.cumsum(acfx[1 : maxlag + 1] ** 2)[lags - 1]
pvalbp = stats.chi2.sf(qboxpierce, lags)
return qljungbox, pval, qboxpierce, pvalbp
|
https://github.com/statsmodels/statsmodels/issues/3229
|
from statsmodels.stats import diagnostic as diag
diag.acorr_ljungbox(np.random.random(50))[0].shape
(40,)
diag.acorr_ljungbox(np.random.random(20), lags=5)
(array([ 0.36718151, 1.02009595, 1.23734092, 3.75338034, 4.35387236]), array([ 0.54454461, 0.60046677, 0.74406305, 0.44040973, 0.49966951]))
diag.acorr_ljungbox(np.random.random(20))
Traceback (most recent call last):
File "<pyshell#543>", line 1, in <module>
diag.acorr_ljungbox(np.random.random(20))
File "m:\josef_new\eclipse_ws\statsmodels\statsmodels_py34_pr\statsmodels\sandbox\stats\diagnostic.py", line 295, in acorr_ljungbox
acf2norm = acfx[1:maxlag+1]**2 / (nobs - np.arange(1,maxlag+1))
ValueError: operands could not be broadcast together with shapes (19,) (40,)
|
ValueError
|
def __getstate__(self):
# remove unpicklable methods
mle_settings = getattr(self, "mle_settings", None)
if mle_settings is not None:
if "callback" in mle_settings:
mle_settings["callback"] = None
if "cov_params_func" in mle_settings:
mle_settings["cov_params_func"] = None
return self.__dict__
|
def __getstate__(self):
try:
# remove unpicklable callback
self.mle_settings["callback"] = None
except (AttributeError, KeyError):
pass
return self.__dict__
|
https://github.com/statsmodels/statsmodels/issues/2685
|
Traceback (most recent call last):
File "statsmodels/base/tests/test_shrink_pickle.py", line 290, in <module>
tt.test_remove_data_pickle()
File "statsmodels/base/tests/test_shrink_pickle.py", line 68, in test_remove_data_pickle
res, l = check_pickle(results._results)
File "statsmodels/base/tests/test_shrink_pickle.py", line 27, in check_pickle
cPickle.dump(obj, fh, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
|
cPickle.PicklingError
|
def impute_pmm(self, vname):
"""
Use predictive mean matching to impute missing values.
Notes
-----
The `perturb_params` method must be called first to define the
model.
"""
k_pmm = self.k_pmm
endog_obs, exog_obs, exog_miss, predict_obs_kwds, predict_miss_kwds = (
self.get_split_data(vname)
)
# Predict imputed variable for both missing and non-missing
# observations
model = self.models[vname]
pendog_obs = model.predict(self.params[vname], exog_obs, **predict_obs_kwds)
pendog_miss = model.predict(self.params[vname], exog_miss, **predict_miss_kwds)
pendog_obs = self._get_predicted(pendog_obs)
pendog_miss = self._get_predicted(pendog_miss)
# Jointly sort the observed and predicted endog values for the
# cases with observed values.
ii = np.argsort(pendog_obs)
endog_obs = endog_obs[ii]
pendog_obs = pendog_obs[ii]
# Find the closest match to the predicted endog values for
# cases with missing endog values.
ix = np.searchsorted(pendog_obs, pendog_miss)
# Get the indices for the closest k_pmm values on
# either side of the closest index.
ixm = ix[:, None] + np.arange(-k_pmm, k_pmm)[None, :]
# Account for boundary effects
msk = np.nonzero((ixm < 0) | (ixm > len(endog_obs) - 1))
ixm = np.clip(ixm, 0, len(endog_obs) - 1)
# Get the distances
dx = pendog_miss[:, None] - pendog_obs[ixm]
dx = np.abs(dx)
dx[msk] = np.inf
# Closest positions in ix, row-wise.
dxi = np.argsort(dx, 1)[:, 0:k_pmm]
# Choose a column for each row.
ir = np.random.randint(0, k_pmm, len(pendog_miss))
# Unwind the indices
jj = np.arange(dxi.shape[0])
ix = dxi[[jj, ir]]
iz = ixm[[jj, ix]]
imputed_miss = np.array(endog_obs[iz]).squeeze()
self._store_changes(vname, imputed_miss)
|
def impute_pmm(self, vname):
"""
Use predictive mean matching to impute missing values.
Notes
-----
The `perturb_params` method must be called first to define the
model.
"""
k_pmm = self.k_pmm
endog_obs, exog_obs, exog_miss, predict_obs_kwds, predict_miss_kwds = (
self.get_split_data(vname)
)
# Predict imputed variable for both missing and non-missing
# observations
model = self.models[vname]
pendog_obs = model.predict(self.params[vname], exog_obs, **predict_obs_kwds)
pendog_miss = model.predict(self.params[vname], exog_miss, **predict_miss_kwds)
pendog_obs = self._get_predicted(pendog_obs)
pendog_miss = self._get_predicted(pendog_miss)
# Jointly sort the observed and predicted endog values for the
# cases with observed values.
ii = np.argsort(pendog_obs)
endog_obs = endog_obs[ii]
pendog_obs = pendog_obs[ii]
# Find the closest match to the predicted endog values for
# cases with missing endog values.
ix = np.searchsorted(pendog_obs, pendog_miss)
# Get the indices for the closest k_pmm values on
# either side of the closest index.
ixm = ix[:, None] + np.arange(-k_pmm, k_pmm)[None, :]
# Account for boundary effects
msk = np.nonzero((ixm < 0) | (ixm > len(endog_obs) - 1))
ixm = np.clip(ixm, 0, len(endog_obs) - 1)
# Get the distances
dx = pendog_miss[:, None] - pendog_obs[ixm]
dx = np.abs(dx)
dx[msk] = np.inf
# Closest positions in ix, row-wise.
dxi = np.argsort(dx, 1)[:, 0:k_pmm]
# Choose a column for each row.
ir = np.random.randint(0, k_pmm, len(pendog_miss))
# Unwind the indices
jj = np.arange(dxi.shape[0])
ix = dxi[[jj, ir]]
iz = ixm[[jj, ix]]
imputed_miss = np.array(endog_obs[iz])
self._store_changes(vname, imputed_miss)
|
https://github.com/statsmodels/statsmodels/issues/3754
|
======================================================================
ERROR: statsmodels.imputation.tests.test_mice.TestMICE.test_MICE
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/statsmodels-0.8.0-py3.6-linux-x86_64.egg/statsmodels/imputation/tests/test_mice.py", line 287, in test_MICE
result = mi.fit(1, 3)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/statsmodels-0.8.0-py3.6-linux-x86_64.egg/statsmodels/imputation/mice.py", line 1228, in fit
self.data.update_all(n_burnin)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/statsmodels-0.8.0-py3.6-linux-x86_64.egg/statsmodels/imputation/mice.py", line 415, in update_all
self.update(vname)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/statsmodels-0.8.0-py3.6-linux-x86_64.egg/statsmodels/imputation/mice.py", line 1005, in update
self.impute(vname)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/statsmodels-0.8.0-py3.6-linux-x86_64.egg/statsmodels/imputation/mice.py", line 988, in impute
self.impute_pmm(vname)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/statsmodels-0.8.0-py3.6-linux-x86_64.egg/statsmodels/imputation/mice.py", line 1080, in impute_pmm
self._store_changes(vname, imputed_miss)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/statsmodels-0.8.0-py3.6-linux-x86_64.egg/statsmodels/imputation/mice.py", line 395, in _store_changes
self.data[col].iloc[ix] = vals
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/pandas/core/indexing.py", line 179, in __setitem__
self._setitem_with_indexer(indexer, value)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/pandas/core/indexing.py", line 619, in _setitem_with_indexer
value=value)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/pandas/core/internals.py", line 3203, in setitem
return self.apply('setitem', **kwargs)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/pandas/core/internals.py", line 3091, in apply
applied = getattr(b, f)(**kwargs)
File "/home/travis/miniconda2/envs/statsmodels-test/lib/python3.6/site-packages/pandas/core/internals.py", line 758, in setitem
values[indexer] = value
ValueError: shape mismatch: value array of shape (5,1) could not be broadcast to indexing result of shape (5,)
|
ValueError
|
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [
{},
]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = int(self.model.exog.shape[1] / ncut)
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params, (ncut, len(self.params) // ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model" % k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1 - pr.sum()]
ax.plot(self.model.endog_values, pr, "o-")
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
|
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [
{},
]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = int(self.model.exog.shape[1] / ncut)
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params, (ncut, len(self.params) / ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model" % k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1 - pr.sum()]
ax.plot(self.model.endog_values, pr, "o-")
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
|
https://github.com/statsmodels/statsmodels/issues/3332
|
======================================================================
ERROR: statsmodels.genmod.tests.test_gee.TestGEE.test_nominal_plot
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/usr/lib/python2.7/dist-packages/numpy/testing/decorators.py", line 147, in skipper_func
return f(*args, **kwargs)
File "/<<BUILDDIR>>/statsmodels-0.8.0~rc1+git59-gef47cd9/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/genmod/tests/test_gee.py", line 187, in test_nominal_plot
fig = result.plot_distribution()
File "/<<BUILDDIR>>/statsmodels-0.8.0~rc1+git59-gef47cd9/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/genmod/generalized_estimating_equations.py", line 2395, in plot_distribution
(ncut, len(self.params) / ncut))
File "/usr/lib/python2.7/dist-packages/numpy/core/fromnumeric.py", line 232, in reshape
return _wrapfunc(a, 'reshape', newshape, order=order)
File "/usr/lib/python2.7/dist-packages/numpy/core/fromnumeric.py", line 67, in _wrapfunc
return _wrapit(obj, method, *args, **kwds)
File "/usr/lib/python2.7/dist-packages/numpy/core/fromnumeric.py", line 47, in _wrapit
result = getattr(asarray(obj), method)(*args, **kwds)
TypeError: 'float' object cannot be interpreted as an index
|
TypeError
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
import pandas as pd
exog_index = exog.index if _is_using_pandas(exog, None) else None
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = pd.DataFrame(exog) # user may pass series, if one predictor
if exog_index is None: # user passed in a dictionary
exog_index = exog.index
exog = dmatrix(
self.model.data.design_info.builder, exog, return_type="dataframe"
)
if len(exog) < len(exog_index):
# missing values, rows have been dropped
if exog_index is not None:
exog = exog.reindex(exog_index)
else:
import warnings
warnings.warn("nan rows have been dropped", ValueWarning)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
predict_results = self.model.predict(self.params, exog, *args, **kwargs)
if exog_index is not None and not hasattr(predict_results, "predicted_values"):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return pd.DataFrame(predict_results, index=exog_index)
else:
return predict_results
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
import pandas as pd
exog_index = exog.index if _is_using_pandas(exog, None) else None
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = dmatrix(
self.model.data.design_info.builder, exog, return_type="dataframe"
)
if len(exog) < len(exog_index):
# missing values, rows have been dropped
if exog_index is not None:
exog = exog.reindex(exog_index)
else:
import warnings
warnings.warn("nan rows have been dropped", ValueWarning)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
predict_results = self.model.predict(self.params, exog, *args, **kwargs)
if exog_index is not None and not hasattr(predict_results, "predicted_values"):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return pd.DataFrame(predict_results, index=exog_index)
else:
return predict_results
|
https://github.com/statsmodels/statsmodels/issues/3182
|
res.predict({'temp': x_p})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "m:\josef_new\eclipse_ws\statsmodels\statsmodels_py34_pr\statsmodels\base\model.py", line 774, in predict
if len(exog) < len(exog_index):
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
import pandas as pd
exog_index = exog.index if _is_using_pandas(exog, None) else None
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = pd.DataFrame(exog) # user may pass series, if one predictor
if exog_index is None: # user passed in a dictionary
exog_index = exog.index
exog = dmatrix(
self.model.data.design_info.builder, exog, return_type="dataframe"
)
if len(exog) < len(exog_index):
# missing values, rows have been dropped
if exog_index is not None:
exog = exog.reindex(exog_index)
else:
import warnings
warnings.warn("nan rows have been dropped", ValueWarning)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
predict_results = self.model.predict(self.params, exog, *args, **kwargs)
if exog_index is not None and not hasattr(predict_results, "predicted_values"):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return pd.DataFrame(predict_results, index=exog_index)
else:
return predict_results
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
import pandas as pd
exog_index = exog.index if _is_using_pandas(exog, None) else None
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = pd.DataFrame(exog) # user may pass series, if one predictor
exog = dmatrix(
self.model.data.design_info.builder, exog, return_type="dataframe"
)
if len(exog) < len(exog_index):
# missing values, rows have been dropped
if exog_index is not None:
exog = exog.reindex(exog_index)
else:
import warnings
warnings.warn("nan rows have been dropped", ValueWarning)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
predict_results = self.model.predict(self.params, exog, *args, **kwargs)
if exog_index is not None and not hasattr(predict_results, "predicted_values"):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return pd.DataFrame(predict_results, index=exog_index)
else:
return predict_results
|
https://github.com/statsmodels/statsmodels/issues/3182
|
res.predict({'temp': x_p})
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "m:\josef_new\eclipse_ws\statsmodels\statsmodels_py34_pr\statsmodels\base\model.py", line 774, in predict
if len(exog) < len(exog_index):
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def _get_index_loc(self, key, base_index=None):
"""
Get the location of a specific key in an index
Parameters
----------
key : label
The key for which to find the location
base_index : pd.Index, optional
Optionally the base index to search. If None, the model's index is
searched.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accomodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accomodate `key`.
Notes
-----
If `key` is past the end of of the given index, and the index is either
an Int64Index or a date index, this function extends the index up to
and including key, and then returns the location in the new index.
"""
if base_index is None:
base_index = self._index
index = base_index
date_index = isinstance(base_index, (PeriodIndex, DatetimeIndex))
index_class = type(base_index)
nobs = len(index)
# Special handling for Int64Index
if (
isinstance(index, Int64Index)
and not date_index
and isinstance(key, (int, long, np.integer))
):
# Negative indices (that lie in the Index)
if key < 0 and -key <= nobs:
key = nobs + key
# Out-of-sample (note that we include key itself in the new index)
elif key > base_index[-1]:
index = Int64Index(np.arange(base_index[0], int(key + 1)))
# Special handling for date indexes
if date_index:
# Integer key (i.e. already given a location)
if isinstance(key, (int, long, np.integer)):
# Negative indices (that lie in the Index)
if key < 0 and -key < nobs:
key = index[nobs + key]
# Out-of-sample (note that we include key itself in the new
# index)
elif key > len(base_index) - 1:
index = index_class(
start=base_index[0], periods=int(key + 1), freq=base_index.freq
)
key = index[-1]
else:
key = index[key]
# Other key types (i.e. string date or some datetime-like object)
else:
# Covert the key to the appropriate date-like object
if index_class is PeriodIndex:
date_key = Period(key, freq=base_index.freq)
else:
date_key = Timestamp(key)
# Out-of-sample
if date_key > base_index[-1]:
# First create an index that may not always include `key`
index = index_class(
start=base_index[0], end=date_key, freq=base_index.freq
)
# Now make sure we include `key`
if not index[-1] == date_key:
index = index_class(
start=base_index[0],
periods=len(index) + 1,
freq=base_index.freq,
)
# Get the location (note that get_loc will throw a KeyError if key is
# invalid)
loc = index.get_loc(key)
# Check if we now have a modified index
index_was_expanded = index is not base_index
# (Never return the actual index object)
if not index_was_expanded:
index = index.copy()
# Return the index through the end of the loc / slice
if isinstance(loc, slice):
end = loc.stop
else:
end = loc
return loc, index[: end + 1], index_was_expanded
|
def _get_index_loc(self, key, base_index=None):
"""
Get the location of a specific key in an index
Parameters
----------
key : label
The key for which to find the location
base_index : pd.Index, optional
Optionally the base index to search. If None, the model's index is
searched.
Returns
-------
loc : int
The location of the key
index : pd.Index
The index including the key; this is a copy of the original index
unless the index had to be expanded to accomodate `key`.
index_was_expanded : bool
Whether or not the index was expanded to accomodate `key`.
Notes
-----
If `key` is past the end of of the given index, and the index is either
an Int64Index or a date index, this function extends the index up to
and including key, and then returns the location in the new index.
"""
if base_index is None:
base_index = self._index
index = base_index
date_index = isinstance(base_index, (PeriodIndex, DatetimeIndex))
index_class = type(base_index)
nobs = len(index)
# Special handling for Int64Index
if (
isinstance(index, Int64Index)
and not date_index
and isinstance(key, (int, long, np.integer))
):
# Negative indices (that lie in the Index)
if key < 0 and -key <= nobs:
key = nobs + key
# Out-of-sample (note that we include key itself in the new index)
elif key > base_index[-1]:
index = Int64Index(np.arange(base_index[0], key + 1))
# Special handling for date indexes
if date_index:
# Integer key (i.e. already given a location)
if isinstance(key, (int, long, np.integer)):
# Negative indices (that lie in the Index)
if key < 0 and -key < nobs:
key = index[nobs + key]
# Out-of-sample (note that we include key itself in the new
# index)
elif key > len(base_index) - 1:
index = index_class(
start=base_index[0], periods=key + 1, freq=base_index.freq
)
key = index[-1]
else:
key = index[key]
# Other key types (i.e. string date or some datetime-like object)
else:
# Covert the key to the appropriate date-like object
if index_class is PeriodIndex:
date_key = Period(key, freq=base_index.freq)
else:
date_key = Timestamp(key)
# Out-of-sample
if date_key > base_index[-1]:
# First create an index that may not always include `key`
index = index_class(
start=base_index[0], end=date_key, freq=base_index.freq
)
# Now make sure we include `key`
if not index[-1] == date_key:
index = index_class(
start=base_index[0],
periods=len(index) + 1,
freq=base_index.freq,
)
# Get the location (note that get_loc will throw a KeyError if key is
# invalid)
loc = index.get_loc(key)
# Check if we now have a modified index
index_was_expanded = index is not base_index
# (Never return the actual index object)
if not index_was_expanded:
index = index.copy()
# Return the index through the end of the loc / slice
if isinstance(loc, slice):
end = loc.stop
else:
end = loc
return loc, index[: end + 1], index_was_expanded
|
https://github.com/statsmodels/statsmodels/issues/3349
|
======================================================================
ERROR: statsmodels.tsa.statespace.tests.test_sarimax.test_misc_exog
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Py\lib\site-packages\nose\case.py", line 197, in runTest
self.test(*self.arg)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\tests\test_sarimax.py", line 1982, in test_misc_exog
res.forecast(steps=1, exog=oos_exog)
File "C:\projects\statsmodels\statsmodels\base\wrapper.py", line 95, in wrapper
obj = data.wrap_output(func(results, *args, **kwargs), how)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2401, in forecast
return self.predict(start=self.nobs, end=end, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2374, in predict
prediction_results = self.get_prediction(start, end, dynamic, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\sarimax.py", line 1889, in get_prediction
self.model._get_prediction_index(start, end, index, silent=True))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 447, in _get_prediction_index
start, start_index, start_oos = self._get_index_label_loc(start)
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 383, in _get_index_label_loc
self._get_index_loc(key, base_index))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 308, in _get_index_loc
freq=base_index.freq)
File "C:\Py\lib\site-packages\pandas\util\decorators.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 301, in __new__
ambiguous=ambiguous)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 506, in _generate
index = _generate_regular_range(start, end, periods, offset)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 1977, in _generate_regular_range
dates = list(xdr)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2894, in generate_range
end = start + (periods - 1) * offset
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 395, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 390, in __add__
return self.apply(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 70, in wrapper
result = func(self, other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2041, in apply
other = other + relativedelta(months=3 * n - monthsSince, day=1)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 383, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 365, in __add__
ret = (other.replace(**repl)
File "pandas\tslib.pyx", line 697, in pandas.tslib.Timestamp.replace (pandas\tslib.c:14489)
File "pandas\tslib.pyx", line 691, in pandas.tslib.Timestamp.replace.validate (pandas\tslib.c:14143)
ValueError: value must be an integer, received <type 'long'> for year
======================================================================
ERROR: statsmodels.tsa.statespace.tests.test_structural.test_misc_exog
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Py\lib\site-packages\nose\case.py", line 197, in runTest
self.test(*self.arg)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\tests\test_structural.py", line 334, in test_misc_exog
res.forecast(steps=1, exog=oos_exog)
File "C:\projects\statsmodels\statsmodels\base\wrapper.py", line 95, in wrapper
obj = data.wrap_output(func(results, *args, **kwargs), how)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2401, in forecast
return self.predict(start=self.nobs, end=end, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2374, in predict
prediction_results = self.get_prediction(start, end, dynamic, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\structural.py", line 1486, in get_prediction
self.model._get_prediction_index(start, end, index, silent=True))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 447, in _get_prediction_index
start, start_index, start_oos = self._get_index_label_loc(start)
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 383, in _get_index_label_loc
self._get_index_loc(key, base_index))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 308, in _get_index_loc
freq=base_index.freq)
File "C:\Py\lib\site-packages\pandas\util\decorators.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 301, in __new__
ambiguous=ambiguous)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 506, in _generate
index = _generate_regular_range(start, end, periods, offset)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 1977, in _generate_regular_range
dates = list(xdr)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2894, in generate_range
end = start + (periods - 1) * offset
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 395, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 390, in __add__
return self.apply(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 70, in wrapper
result = func(self, other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2041, in apply
other = other + relativedelta(months=3 * n - monthsSince, day=1)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 383, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 365, in __add__
ret = (other.replace(**repl)
File "pandas\tslib.pyx", line 697, in pandas.tslib.Timestamp.replace (pandas\tslib.c:14489)
File "pandas\tslib.pyx", line 691, in pandas.tslib.Timestamp.replace.validate (pandas\tslib.c:14143)
ValueError: value must be an integer, received <type 'long'> for year
======================================================================
ERROR: statsmodels.tsa.statespace.tests.test_varmax.TestVAR_exog.test_forecast
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Py\lib\site-packages\nose\case.py", line 197, in runTest
self.test(*self.arg)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\tests\test_varmax.py", line 437, in test_forecast
desired = self.results.forecast(steps=16, exog=exog)
File "C:\projects\statsmodels\statsmodels\base\wrapper.py", line 95, in wrapper
obj = data.wrap_output(func(results, *args, **kwargs), how)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2401, in forecast
return self.predict(start=self.nobs, end=end, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2374, in predict
prediction_results = self.get_prediction(start, end, dynamic, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\varmax.py", line 781, in get_prediction
self.model._get_prediction_index(start, end, index, silent=True))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 447, in _get_prediction_index
start, start_index, start_oos = self._get_index_label_loc(start)
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 383, in _get_index_label_loc
self._get_index_loc(key, base_index))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 308, in _get_index_loc
freq=base_index.freq)
File "C:\Py\lib\site-packages\pandas\util\decorators.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 301, in __new__
ambiguous=ambiguous)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 506, in _generate
index = _generate_regular_range(start, end, periods, offset)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 1977, in _generate_regular_range
dates = list(xdr)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2894, in generate_range
end = start + (periods - 1) * offset
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 395, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 390, in __add__
return self.apply(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 70, in wrapper
result = func(self, other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2041, in apply
other = other + relativedelta(months=3 * n - monthsSince, day=1)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 383, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 365, in __add__
ret = (other.replace(**repl)
File "pandas\tslib.pyx", line 697, in pandas.tslib.Timestamp.replace (pandas\tslib.c:14489)
File "pandas\tslib.pyx", line 691, in pandas.tslib.Timestamp.replace.validate (pandas\tslib.c:14143)
ValueError: value must be an integer, received <type 'long'> for year
======================================================================
ERROR: statsmodels.tsa.statespace.tests.test_varmax.TestVAR_exog2.test_forecast
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Py\lib\site-packages\nose\case.py", line 197, in runTest
self.test(*self.arg)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\tests\test_varmax.py", line 528, in test_forecast
desired = self.results.forecast(steps=16, exog=exog)
File "C:\projects\statsmodels\statsmodels\base\wrapper.py", line 95, in wrapper
obj = data.wrap_output(func(results, *args, **kwargs), how)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2401, in forecast
return self.predict(start=self.nobs, end=end, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2374, in predict
prediction_results = self.get_prediction(start, end, dynamic, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\varmax.py", line 781, in get_prediction
self.model._get_prediction_index(start, end, index, silent=True))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 447, in _get_prediction_index
start, start_index, start_oos = self._get_index_label_loc(start)
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 383, in _get_index_label_loc
self._get_index_loc(key, base_index))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 308, in _get_index_loc
freq=base_index.freq)
File "C:\Py\lib\site-packages\pandas\util\decorators.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 301, in __new__
ambiguous=ambiguous)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 506, in _generate
index = _generate_regular_range(start, end, periods, offset)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 1977, in _generate_regular_range
dates = list(xdr)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2894, in generate_range
end = start + (periods - 1) * offset
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 395, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 390, in __add__
return self.apply(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 70, in wrapper
result = func(self, other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2041, in apply
other = other + relativedelta(months=3 * n - monthsSince, day=1)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 383, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 365, in __add__
ret = (other.replace(**repl)
File "pandas\tslib.pyx", line 697, in pandas.tslib.Timestamp.replace (pandas\tslib.c:14489)
File "pandas\tslib.pyx", line 691, in pandas.tslib.Timestamp.replace.validate (pandas\tslib.c:14143)
ValueError: value must be an integer, received <type 'long'> for year
======================================================================
ERROR: statsmodels.tsa.statespace.tests.test_varmax.test_misc_exog
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Py\lib\site-packages\nose\case.py", line 197, in runTest
self.test(*self.arg)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\tests\test_varmax.py", line 817, in test_misc_exog
res.forecast(steps=1, exog=oos_exog)
File "C:\projects\statsmodels\statsmodels\base\wrapper.py", line 95, in wrapper
obj = data.wrap_output(func(results, *args, **kwargs), how)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2401, in forecast
return self.predict(start=self.nobs, end=end, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\mlemodel.py", line 2374, in predict
prediction_results = self.get_prediction(start, end, dynamic, **kwargs)
File "C:\projects\statsmodels\statsmodels\tsa\statespace\varmax.py", line 781, in get_prediction
self.model._get_prediction_index(start, end, index, silent=True))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 447, in _get_prediction_index
start, start_index, start_oos = self._get_index_label_loc(start)
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 383, in _get_index_label_loc
self._get_index_loc(key, base_index))
File "C:\projects\statsmodels\statsmodels\tsa\base\tsa_model.py", line 308, in _get_index_loc
freq=base_index.freq)
File "C:\Py\lib\site-packages\pandas\util\decorators.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 301, in __new__
ambiguous=ambiguous)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 506, in _generate
index = _generate_regular_range(start, end, periods, offset)
File "C:\Py\lib\site-packages\pandas\tseries\index.py", line 1977, in _generate_regular_range
dates = list(xdr)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2894, in generate_range
end = start + (periods - 1) * offset
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 395, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 390, in __add__
return self.apply(other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 70, in wrapper
result = func(self, other)
File "C:\Py\lib\site-packages\pandas\tseries\offsets.py", line 2041, in apply
other = other + relativedelta(months=3 * n - monthsSince, day=1)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 383, in __radd__
return self.__add__(other)
File "C:\Py\lib\site-packages\dateutil\relativedelta.py", line 365, in __add__
ret = (other.replace(**repl)
File "pandas\tslib.pyx", line 697, in pandas.tslib.Timestamp.replace (pandas\tslib.c:14489)
File "pandas\tslib.pyx", line 691, in pandas.tslib.Timestamp.replace.validate (pandas\tslib.c:14143)
ValueError: value must be an integer, received <type 'long'> for year
|
ValueError
|
def fit(
self,
start_params=None,
method="newton",
maxiter=100,
full_output=True,
disp=True,
fargs=(),
callback=None,
retall=False,
skip_hessian=False,
**kwargs,
):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
fargs : tuple, optional
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
skip_hessian : bool, optional
If False (default), then the negative inverse hessian is calculated
after the optimization. If True, then the hessian will not be
calculated. However, it will be available in methods that use the
hessian in the optimization (currently only with `"newton"`).
kwargs : keywords
All kwargs are passed to the chosen solver with one exception. The
following keyword controls what happens after the fit::
warn_convergence : bool, optional
If True, checks the model for the converged flag. If the
converged flag is False, a ConvergenceWarning is issued.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for solvers (see returned Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
This many terms are used for the Hessian approximation.
factr : float
A stop condition that is a variant of relative error.
pgtol : float
A stop condition that uses the projected gradient.
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
maxfun : int
Maximum number of function evaluations to make.
bounds : sequence
(min, max) pairs for each element in x,
defining the bounds on that parameter.
Use None for one of min or max when there is no bound
in that direction.
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
"""
Hinv = None # JP error if full_output=0, Hinv not defined
if start_params is None:
if hasattr(self, "start_params"):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else:
raise ValueError("If exog is None, then start_params should be specified")
# TODO: separate args from nonarg taking score and hessian, ie.,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
nobs = self.endog.shape[0]
f = lambda params, *args: -self.loglike(params, *args) / nobs
score = lambda params, *args: -self.score(params, *args) / nobs
try:
hess = lambda params, *args: -self.hessian(params, *args) / nobs
except:
hess = None
if method == "newton":
score = lambda params, *args: self.score(params, *args) / nobs
hess = lambda params, *args: self.hessian(params, *args) / nobs
# TODO: why are score and hess positive?
warn_convergence = kwargs.pop("warn_convergence", True)
optimizer = Optimizer()
xopt, retvals, optim_settings = optimizer._fit(
f,
score,
start_params,
fargs,
kwargs,
hessian=hess,
method=method,
disp=disp,
maxiter=maxiter,
callback=callback,
retall=retall,
full_output=full_output,
)
# NOTE: this is for fit_regularized and should be generalized
cov_params_func = kwargs.setdefault("cov_params_func", None)
if cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == "newton" and full_output:
Hinv = np.linalg.inv(-retvals["Hessian"]) / nobs
elif not skip_hessian:
H = -1 * self.hessian(xopt)
invertible = False
if np.all(np.isfinite(H)):
eigvals, eigvecs = np.linalg.eigh(H)
if np.min(eigvals) > 0:
invertible = True
if invertible:
Hinv = eigvecs.dot(np.diag(1.0 / eigvals)).dot(eigvecs.T)
Hinv = np.asfortranarray((Hinv + Hinv.T) / 2.0)
else:
from warnings import warn
warn(
"Inverting hessian failed, no bse or cov_params available",
HessianInversionWarning,
)
Hinv = None
if "cov_type" in kwargs:
cov_kwds = kwargs.get("cov_kwds", {})
kwds = {"cov_type": kwargs["cov_type"], "cov_kwds": cov_kwds}
else:
kwds = {}
if "use_t" in kwargs:
kwds["use_t"] = kwargs["use_t"]
# prints for debugging
# print('kwargs inLikelihoodModel.fit', kwargs)
# print('kwds inLikelihoodModel.fit', kwds)
# TODO: add Hessian approximation and change the above if needed
mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1.0, **kwds)
# TODO: hardcode scale?
if isinstance(retvals, dict):
mlefit.mle_retvals = retvals
if warn_convergence and not retvals["converged"]:
from warnings import warn
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warn(
"Maximum Likelihood optimization failed to converge. Check mle_retvals",
ConvergenceWarning,
)
mlefit.mle_settings = optim_settings
return mlefit
|
def fit(
self,
start_params=None,
method="newton",
maxiter=100,
full_output=True,
disp=True,
fargs=(),
callback=None,
retall=False,
skip_hessian=False,
**kwargs,
):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
fargs : tuple, optional
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
skip_hessian : bool, optional
If False (default), then the negative inverse hessian is calculated
after the optimization. If True, then the hessian will not be
calculated. However, it will be available in methods that use the
hessian in the optimization (currently only with `"newton"`).
kwargs : keywords
All kwargs are passed to the chosen solver with one exception. The
following keyword controls what happens after the fit::
warn_convergence : bool, optional
If True, checks the model for the converged flag. If the
converged flag is False, a ConvergenceWarning is issued.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for solvers (see returned Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
This many terms are used for the Hessian approximation.
factr : float
A stop condition that is a variant of relative error.
pgtol : float
A stop condition that uses the projected gradient.
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
maxfun : int
Maximum number of function evaluations to make.
bounds : sequence
(min, max) pairs for each element in x,
defining the bounds on that parameter.
Use None for one of min or max when there is no bound
in that direction.
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
"""
Hinv = None # JP error if full_output=0, Hinv not defined
if start_params is None:
if hasattr(self, "start_params"):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else:
raise ValueError("If exog is None, then start_params should be specified")
# TODO: separate args from nonarg taking score and hessian, ie.,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
nobs = self.endog.shape[0]
f = lambda params, *args: -self.loglike(params, *args) / nobs
score = lambda params, *args: -self.score(params, *args) / nobs
try:
hess = lambda params, *args: -self.hessian(params, *args) / nobs
except:
hess = None
if method == "newton":
score = lambda params, *args: self.score(params, *args) / nobs
hess = lambda params, *args: self.hessian(params, *args) / nobs
# TODO: why are score and hess positive?
warn_convergence = kwargs.pop("warn_convergence", True)
optimizer = Optimizer()
xopt, retvals, optim_settings = optimizer._fit(
f,
score,
start_params,
fargs,
kwargs,
hessian=hess,
method=method,
disp=disp,
maxiter=maxiter,
callback=callback,
retall=retall,
full_output=full_output,
)
# NOTE: this is for fit_regularized and should be generalized
cov_params_func = kwargs.setdefault("cov_params_func", None)
if cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == "newton" and full_output:
Hinv = np.linalg.inv(-retvals["Hessian"]) / nobs
elif not skip_hessian:
try:
Hinv = np.linalg.inv(-1 * self.hessian(xopt))
except:
# might want custom warning ResultsWarning? NumericalWarning?
from warnings import warn
warndoc = "Inverting hessian failed, no bse or cov_params available"
warn(warndoc, RuntimeWarning)
Hinv = None
if "cov_type" in kwargs:
cov_kwds = kwargs.get("cov_kwds", {})
kwds = {"cov_type": kwargs["cov_type"], "cov_kwds": cov_kwds}
else:
kwds = {}
if "use_t" in kwargs:
kwds["use_t"] = kwargs["use_t"]
# prints for debugging
# print('kwargs inLikelihoodModel.fit', kwargs)
# print('kwds inLikelihoodModel.fit', kwds)
# TODO: add Hessian approximation and change the above if needed
mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1.0, **kwds)
# TODO: hardcode scale?
if isinstance(retvals, dict):
mlefit.mle_retvals = retvals
if warn_convergence and not retvals["converged"]:
from warnings import warn
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warn(
"Maximum Likelihood optimization failed to converge. Check mle_retvals",
ConvergenceWarning,
)
mlefit.mle_settings = optim_settings
return mlefit
|
https://github.com/statsmodels/statsmodels/issues/3098
|
Traceback (most recent call last):
File "C:\git\statsmodels\statsmodels\base\model.py", line 447, in fit
H = -1 * self.hessian(xopt)
File "C:\git\statsmodels\statsmodels\tsa\arima_model.py", line 593, in hessian
return approx_hess_cs(params, self.loglike, args=(False,))
File "C:\git\statsmodels\statsmodels\tools\numdiff.py", line 243, in approx_hess_cs
**kwargs)).imag/2./hess[i, j]
File "C:\git\statsmodels\statsmodels\tsa\arima_model.py", line 776, in loglike
return self.loglike_kalman(params, set_sigma2)
File "C:\git\statsmodels\statsmodels\tsa\arima_model.py", line 786, in loglike_kalman
return KalmanFilter.loglike(params, self, set_sigma2)
File "C:\git\statsmodels\statsmodels\tsa\kalmanf\kalmanfilter.py", line 666, in loglike
R_mat, T_mat)
File "statsmodels\tsa\kalmanf\kalman_loglike.pyx", line 359, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_loglike_complex (statsmodels/tsa/kalmanf/kalman_loglike.c:6432)
v, F, loglikelihood = kalman_filter_complex(y,k,p,q,r,nobs,Z_mat,R_mat,T_mat)
File "statsmodels\tsa\kalmanf\kalman_loglike.pyx", line 229, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_filter_complex (statsmodels/tsa/kalmanf/kalman_loglike.c:4956)
dot(R_mat,R_mat.T).ravel('F')).reshape(r,r, order='F'))
File "statsmodels\tsa\kalmanf\stringsource", line 644, in View.MemoryView.memoryview_cwrapper (statsmodels/tsa/kalmanf/kalman_loglike.c:14545)
File "statsmodels\tsa\kalmanf\stringsource", line 345, in View.MemoryView.memoryview.__cinit__ (statsmodels/tsa/kalmanf/kalman_loglike.c:10820)
ValueError: ndarray is not Fortran contiguous
|
ValueError
|
def initialize(self):
if not self.score: # right now score is not optional
self.score = approx_fprime
if not self.hessian:
pass
else: # can use approx_hess_p if we have a gradient
if not self.hessian:
pass
# Initialize is called by
# statsmodels.model.LikelihoodModel.__init__
# and should contain any preprocessing that needs to be done for a model
if self.exog is not None:
# assume constant
er = np_matrix_rank(self.exog)
self.df_model = float(er - 1)
self.df_resid = float(self.exog.shape[0] - er)
else:
self.df_model = np.nan
self.df_resid = np.nan
super(GenericLikelihoodModel, self).initialize()
|
def initialize(self):
if not self.score: # right now score is not optional
self.score = approx_fprime
if not self.hessian:
pass
else: # can use approx_hess_p if we have a gradient
if not self.hessian:
pass
# Initialize is called by
# statsmodels.model.LikelihoodModel.__init__
# and should contain any preprocessing that needs to be done for a model
from statsmodels.tools import tools
if self.exog is not None:
# assume constant
er = np_matrix_rank(self.exog)
self.df_model = float(er - 1)
self.df_resid = float(self.exog.shape[0] - er)
else:
self.df_model = np.nan
self.df_resid = np.nan
super(GenericLikelihoodModel, self).initialize()
|
https://github.com/statsmodels/statsmodels/issues/3098
|
Traceback (most recent call last):
File "C:\git\statsmodels\statsmodels\base\model.py", line 447, in fit
H = -1 * self.hessian(xopt)
File "C:\git\statsmodels\statsmodels\tsa\arima_model.py", line 593, in hessian
return approx_hess_cs(params, self.loglike, args=(False,))
File "C:\git\statsmodels\statsmodels\tools\numdiff.py", line 243, in approx_hess_cs
**kwargs)).imag/2./hess[i, j]
File "C:\git\statsmodels\statsmodels\tsa\arima_model.py", line 776, in loglike
return self.loglike_kalman(params, set_sigma2)
File "C:\git\statsmodels\statsmodels\tsa\arima_model.py", line 786, in loglike_kalman
return KalmanFilter.loglike(params, self, set_sigma2)
File "C:\git\statsmodels\statsmodels\tsa\kalmanf\kalmanfilter.py", line 666, in loglike
R_mat, T_mat)
File "statsmodels\tsa\kalmanf\kalman_loglike.pyx", line 359, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_loglike_complex (statsmodels/tsa/kalmanf/kalman_loglike.c:6432)
v, F, loglikelihood = kalman_filter_complex(y,k,p,q,r,nobs,Z_mat,R_mat,T_mat)
File "statsmodels\tsa\kalmanf\kalman_loglike.pyx", line 229, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_filter_complex (statsmodels/tsa/kalmanf/kalman_loglike.c:4956)
dot(R_mat,R_mat.T).ravel('F')).reshape(r,r, order='F'))
File "statsmodels\tsa\kalmanf\stringsource", line 644, in View.MemoryView.memoryview_cwrapper (statsmodels/tsa/kalmanf/kalman_loglike.c:14545)
File "statsmodels\tsa\kalmanf\stringsource", line 345, in View.MemoryView.memoryview.__cinit__ (statsmodels/tsa/kalmanf/kalman_loglike.c:10820)
ValueError: ndarray is not Fortran contiguous
|
ValueError
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
import pandas as pd
exog_index = exog.index if _is_using_pandas(exog, None) else None
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = dmatrix(
self.model.data.design_info.builder, exog, return_type="dataframe"
)
if len(exog) < len(exog_index):
# missing values, rows have been dropped
if exog_index is not None:
exog = exog.reindex(exog_index)
else:
import warnings
warnings.warn("nan rows have been dropped", ValueWarning)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
predict_results = self.model.predict(self.params, exog, *args, **kwargs)
if exog_index is not None and not hasattr(predict_results, "predicted_values"):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return pd.DataFrame(predict_results, index=exog_index)
else:
return predict_results
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
import pandas as pd
exog_index = exog.index if _is_using_pandas(exog, None) else None
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder, exog)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
predict_results = self.model.predict(self.params, exog, *args, **kwargs)
if exog_index is not None and not hasattr(predict_results, "predicted_values"):
if predict_results.ndim == 1:
return pd.Series(predict_results, index=exog_index)
else:
return pd.DataFrame(predict_results, index=exog_index)
else:
return predict_results
|
https://github.com/statsmodels/statsmodels/issues/3087
|
Traceback (most recent call last):
File "E:\josef_new_notebook\scripts\bug_missing_formula.py", line 10, in <modu
le>
test3 = result3.predict(df3) # Fails
File "E:\josef_new_notebook\git\statsmodels_py34_pr\statsmodels\base\model.py"
, line 767, in predict
return pd.Series(predict_results, index=exog_index)
File "C:\Users\josef\Downloads\WinPython-64bit-3.4.4.2\python-3.4.4.amd64\lib\
site-packages\pandas\core\series.py", line 228, in __init__
data = SingleBlockManager(data, index, fastpath=True)
File "C:\Users\josef\Downloads\WinPython-64bit-3.4.4.2\python-3.4.4.amd64\lib\
site-packages\pandas\core\internals.py", line 3752, in __init__
fastpath=True)
File "C:\Users\josef\Downloads\WinPython-64bit-3.4.4.2\python-3.4.4.amd64\lib\
site-packages\pandas\core\internals.py", line 2461, in make_block
return klass(values, ndim=ndim, fastpath=fastpath, placement=placement)
File "C:\Users\josef\Downloads\WinPython-64bit-3.4.4.2\python-3.4.4.amd64\lib\
site-packages\pandas\core\internals.py", line 84, in __init__
len(self.mgr_locs)))
ValueError: Wrong number of items passed 5, placement implies 6
|
ValueError
|
def plot_corr(
dcorr,
xnames=None,
ynames=None,
title=None,
normcolor=False,
ax=None,
cmap="RdYlBu_r",
):
"""Plot correlation of many variables in a tight color grid.
Parameters
----------
dcorr : ndarray
Correlation matrix, square 2-D array.
xnames : list of str, optional
Labels for the horizontal axis. If not given (None), then the
matplotlib defaults (integers) are used. If it is an empty list, [],
then no ticks and labels are added.
ynames : list of str, optional
Labels for the vertical axis. Works the same way as `xnames`.
If not given, the same names as for `xnames` are re-used.
title : str, optional
The figure title. If None, the default ('Correlation Matrix') is used.
If ``title=''``, then no title is added.
normcolor : bool or tuple of scalars, optional
If False (default), then the color coding range corresponds to the
range of `dcorr`. If True, then the color range is normalized to
(-1, 1). If this is a tuple of two numbers, then they define the range
for the color bar.
ax : Matplotlib AxesSubplot instance, optional
If `ax` is None, then a figure is created. If an axis instance is
given, then only the main plot but not the colorbar is created.
cmap : str or Matplotlib Colormap instance, optional
The colormap for the plot. Can be any valid Matplotlib Colormap
instance or name.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import statsmodels.graphics.api as smg
>>> hie_data = sm.datasets.randhie.load_pandas()
>>> corr_matrix = np.corrcoef(hie_data.data.T)
>>> smg.plot_corr(corr_matrix, xnames=hie_data.names)
>>> plt.show()
"""
if ax is None:
create_colorbar = True
else:
create_colorbar = False
fig, ax = utils.create_mpl_ax(ax)
import matplotlib as mpl
from matplotlib import cm
nvars = dcorr.shape[0]
if ynames is None:
ynames = xnames
if title is None:
title = "Correlation Matrix"
if isinstance(normcolor, tuple):
vmin, vmax = normcolor
elif normcolor:
vmin, vmax = -1.0, 1.0
else:
vmin, vmax = None, None
axim = ax.imshow(
dcorr,
cmap=cmap,
interpolation="nearest",
extent=(0, nvars, 0, nvars),
vmin=vmin,
vmax=vmax,
)
# create list of label positions
labelPos = np.arange(0, nvars) + 0.5
if not ynames is None:
ax.set_yticks(labelPos)
ax.set_yticks(labelPos[:-1] + 0.5, minor=True)
ax.set_yticklabels(ynames[::-1], fontsize="small", horizontalalignment="right")
elif ynames == []:
ax.set_yticks([])
if not xnames is None:
ax.set_xticks(labelPos)
ax.set_xticks(labelPos[:-1] + 0.5, minor=True)
ax.set_xticklabels(
xnames, fontsize="small", rotation=45, horizontalalignment="right"
)
elif xnames == []:
ax.set_xticks([])
if not title == "":
ax.set_title(title)
if create_colorbar:
fig.colorbar(axim, use_gridspec=True)
fig.tight_layout()
ax.tick_params(which="minor", length=0)
ax.tick_params(direction="out", top=False, right=False)
try:
ax.grid(True, which="minor", linestyle="-", color="w", lw=1)
except AttributeError:
# Seems to fail for axes created with AxesGrid. MPL bug?
pass
return fig
|
def plot_corr(
dcorr,
xnames=None,
ynames=None,
title=None,
normcolor=False,
ax=None,
cmap="RdYlBu_r",
):
"""Plot correlation of many variables in a tight color grid.
Parameters
----------
dcorr : ndarray
Correlation matrix, square 2-D array.
xnames : list of str, optional
Labels for the horizontal axis. If not given (None), then the
matplotlib defaults (integers) are used. If it is an empty list, [],
then no ticks and labels are added.
ynames : list of str, optional
Labels for the vertical axis. Works the same way as `xnames`.
If not given, the same names as for `xnames` are re-used.
title : str, optional
The figure title. If None, the default ('Correlation Matrix') is used.
If ``title=''``, then no title is added.
normcolor : bool or tuple of scalars, optional
If False (default), then the color coding range corresponds to the
range of `dcorr`. If True, then the color range is normalized to
(-1, 1). If this is a tuple of two numbers, then they define the range
for the color bar.
ax : Matplotlib AxesSubplot instance, optional
If `ax` is None, then a figure is created. If an axis instance is
given, then only the main plot but not the colorbar is created.
cmap : str or Matplotlib Colormap instance, optional
The colormap for the plot. Can be any valid Matplotlib Colormap
instance or name.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import statsmodels.graphics.api as smg
>>> hie_data = sm.datasets.randhie.load_pandas()
>>> corr_matrix = np.corrcoef(hie_data.data.T)
>>> smg.plot_corr(corr_matrix, xnames=hie_data.names)
>>> plt.show()
"""
if ax is None:
create_colorbar = True
else:
create_colorbar = False
fig, ax = utils.create_mpl_ax(ax)
import matplotlib as mpl
from matplotlib import cm
nvars = dcorr.shape[0]
if ynames is None:
ynames = xnames
if title is None:
title = "Correlation Matrix"
if isinstance(normcolor, tuple):
vmin, vmax = normcolor
elif normcolor:
vmin, vmax = -1.0, 1.0
else:
vmin, vmax = None, None
axim = ax.imshow(
dcorr,
cmap=cmap,
interpolation="nearest",
extent=(0, nvars, 0, nvars),
vmin=vmin,
vmax=vmax,
)
# create list of label positions
labelPos = np.arange(0, nvars) + 0.5
if ynames:
ax.set_yticks(labelPos)
ax.set_yticks(labelPos[:-1] + 0.5, minor=True)
ax.set_yticklabels(ynames[::-1], fontsize="small", horizontalalignment="right")
elif ynames == []:
ax.set_yticks([])
if xnames:
ax.set_xticks(labelPos)
ax.set_xticks(labelPos[:-1] + 0.5, minor=True)
ax.set_xticklabels(
xnames, fontsize="small", rotation=45, horizontalalignment="right"
)
elif xnames == []:
ax.set_xticks([])
if not title == "":
ax.set_title(title)
if create_colorbar:
fig.colorbar(axim, use_gridspec=True)
fig.tight_layout()
ax.tick_params(which="minor", length=0)
ax.tick_params(direction="out", top=False, right=False)
try:
ax.grid(True, which="minor", linestyle="-", color="w", lw=1)
except AttributeError:
# Seems to fail for axes created with AxesGrid. MPL bug?
pass
return fig
|
https://github.com/statsmodels/statsmodels/issues/2510
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-24-f81f43c530b6> in <module>()
1 corr_columns = df_business[...]
2 corr_matrix = corr_columns.corr()
----> 3 smg.plot_corr(corr_matrix, xnames=corr_columns.columns)
4 plt.show()
C:\portabel\miniconda\envs\sc\lib\site-packages\statsmodels\graphics\correlation.pyc in plot_corr(dcorr, xnames, ynames, title, normcolor, ax, cmap)
89 labelPos = np.arange(0, nvars) + 0.5
90
---> 91 if ynames:
92 ax.set_yticks(labelPos)
93 ax.set_yticks(labelPos[:-1]+0.5, minor=True)
C:\portabel\miniconda\envs\sc\lib\site-packages\pandas\core\index.pyc in __nonzero__(self)
1039 raise ValueError("The truth value of a {0} is ambiguous. "
1040 "Use a.empty, a.bool(), a.item(), a.any() or a.all()."
-> 1041 .format(self.__class__.__name__))
1042
1043 __bool__ = __nonzero__
ValueError: The truth value of a Index is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
|
ValueError
|
def fit(self, maxlags=None, method="ols", ic=None, trend="c", verbose=False):
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lutkepohl pp. 146-153
Returns
-------
est : VARResults
"""
lags = maxlags
if trend not in ["c", "ct", "ctt", "nc"]:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception(
"%s not recognized, must be among %s" % (ic, sorted(selections))
)
lags = selections[ic]
if verbose:
print("Using %d based on %s criterion" % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = len(self.endog) - lags
return self._estimate_var(lags, trend=trend)
|
def fit(self, maxlags=None, method="ols", ic=None, trend="c", verbose=False):
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lutkepohl pp. 146-153
Returns
-------
est : VARResults
"""
lags = maxlags
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception(
"%s not recognized, must be among %s" % (ic, sorted(selections))
)
lags = selections[ic]
if verbose:
print("Using %d based on %s criterion" % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = len(self.endog) - lags
return self._estimate_var(lags, trend=trend)
|
https://github.com/statsmodels/statsmodels/issues/2271
|
model_t = VAR(data_t)
results = model.fit(4, trend = 't')
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-98-f7328095d596> in <module>()
1 model_t = VAR(data_t)
----> 2 results = model.fit(4, trend = 't')
e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2_py27\statsmodels\statsmodels\tsa\vector_ar\var_model.py in fit(self, maxlags, method, ic, trend, verbose)
429 lags = 1
430
--> 431 k_trend = util.get_trendorder(trend)
432 self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
433 self.nobs = len(self.endog) - lags
e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2_py27\statsmodels\statsmodels\tsa\vector_ar\util.pyc in get_trendorder(trend)
45 elif trend == 'ctt':
46 trendorder = 3
---> 47 return trendorder
48
49 def make_lag_names(names, lag_order, trendorder=1):
UnboundLocalError: local variable 'trendorder' referenced before assignment
|
UnboundLocalError
|
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients ex intercept
"""
# drop intercept and trend
return self.cov_params[self.k_trend * self.neqs :, self.k_trend * self.neqs :]
|
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients ex intercept
"""
# drop intercept
return self.cov_params[self.neqs :, self.neqs :]
|
https://github.com/statsmodels/statsmodels/issues/1636
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/statsmodels/tsa/vector_ar/irf.py", line 138, in plot
stderr = self.cov(orth=orth)
File "/usr/lib/python2.7/dist-packages/statsmodels/tsa/vector_ar/irf.py", line 264, in cov
covs[i] = chain_dot(Gi, self.cov_a, Gi.T)
File "/usr/lib/python2.7/dist-packages/statsmodels/tools/tools.py", line 460, in chain_dot
return reduce(lambda x, y: np.dot(y, x), arrs[::-1])
File "/usr/lib/python2.7/dist-packages/statsmodels/tools/tools.py", line 460, in <lambda>
return reduce(lambda x, y: np.dot(y, x), arrs[::-1])
ValueError: matrices are not aligned
|
ValueError
|
def __init__(self, endog, exog=None, missing="none", hasconst=None, **kwargs):
if "design_info" in kwargs:
self.design_info = kwargs.pop("design_info")
if missing != "none":
arrays, nan_idx = self.handle_missing(endog, exog, missing, **kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
self.endog, self.exog = self._convert_endog_exog(self.endog, self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
self.orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
# this has side-effects, attaches k_constant and const_idx
self._handle_constant(hasconst)
self._check_integrity()
self._cache = resettable_cache()
|
def __init__(self, endog, exog=None, missing="none", hasconst=None, **kwargs):
if missing != "none":
arrays, nan_idx = self.handle_missing(endog, exog, missing, **kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
self.endog, self.exog = self._convert_endog_exog(self.endog, self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
self.orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
# this has side-effects, attaches k_constant and const_idx
self._handle_constant(hasconst)
self._check_integrity()
self._cache = resettable_cache()
|
https://github.com/statsmodels/statsmodels/issues/2171
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 540, in runfile
execfile(filename, namespace)
File "/home/adrian/Desktop/ToDo/statsmodels_debugging/OLS.py", line 34, in <module>
fit.predict( exog=data[:-1] ) # fails
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.1-py2.7-linux-i686.egg/statsmodels/base/model.py", line 739, in predict
exog = dmatrix(self.model.data.orig_exog.design_info.builder,
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 1936, in __getattr__
(type(self).__name__, name))
AttributeError: 'DataFrame' object has no attribute 'design_info'
|
AttributeError
|
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
data = handle_data(endog, exog, missing, hasconst, **kwargs)
# kwargs arrays could have changed, easier to just attach here
for key in kwargs:
if key == "design_info": # leave this attached to data
continue
# pop so we don't start keeping all these twice or references
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError: # panel already pops keys in data handling
pass
return data
|
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
data = handle_data(endog, exog, missing, hasconst, **kwargs)
# kwargs arrays could have changed, easier to just attach here
for key in kwargs:
# pop so we don't start keeping all these twice or references
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError: # panel already pops keys in data handling
pass
return data
|
https://github.com/statsmodels/statsmodels/issues/2171
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 540, in runfile
execfile(filename, namespace)
File "/home/adrian/Desktop/ToDo/statsmodels_debugging/OLS.py", line 34, in <module>
fit.predict( exog=data[:-1] ) # fails
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.1-py2.7-linux-i686.egg/statsmodels/base/model.py", line 739, in predict
exog = dmatrix(self.model.data.orig_exog.design_info.builder,
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 1936, in __getattr__
(type(self).__name__, name))
AttributeError: 'DataFrame' object has no attribute 'design_info'
|
AttributeError
|
def from_formula(cls, formula, data, subset=None, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
subset : array-like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
# TODO: provide a docs template for args/kwargs from child models
# TODO: subset could use syntax. issue #469.
if subset is not None:
data = data.ix[subset]
eval_env = kwargs.pop("eval_env", None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
else:
eval_env += 1 # we're going down the stack again
missing = kwargs.get("missing", "drop")
if missing == "none": # with patys it's drop or raise. let's raise.
missing = "raise"
((endog, exog), missing_idx, design_info) = handle_formula_data(
data, None, formula, depth=eval_env, missing=missing
)
kwargs.update(
{"missing_idx": missing_idx, "missing": missing, "design_info": design_info}
)
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod
|
def from_formula(cls, formula, data, subset=None, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
subset : array-like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
# TODO: provide a docs template for args/kwargs from child models
# TODO: subset could use syntax. issue #469.
if subset is not None:
data = data.ix[subset]
eval_env = kwargs.pop("eval_env", None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
else:
eval_env += 1 # we're going down the stack again
missing = kwargs.get("missing", "drop")
if missing == "none": # with patys it's drop or raise. let's raise.
missing = "raise"
(endog, exog), missing_idx = handle_formula_data(
data, None, formula, depth=eval_env, missing=missing
)
kwargs.update({"missing_idx": missing_idx, "missing": missing})
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod
|
https://github.com/statsmodels/statsmodels/issues/2171
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 540, in runfile
execfile(filename, namespace)
File "/home/adrian/Desktop/ToDo/statsmodels_debugging/OLS.py", line 34, in <module>
fit.predict( exog=data[:-1] ) # fails
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.1-py2.7-linux-i686.egg/statsmodels/base/model.py", line 739, in predict
exog = dmatrix(self.model.data.orig_exog.design_info.builder,
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 1936, in __getattr__
(type(self).__name__, name))
AttributeError: 'DataFrame' object has no attribute 'design_info'
|
AttributeError
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray or pandas.Series
See self.model.predict
"""
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder, exog)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return self.model.predict(self.params, exog, *args, **kwargs)
|
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray or pandas.Series
See self.model.predict
"""
if transform and hasattr(self.model, "formula") and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.orig_exog.design_info.builder, exog)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (
self.model.exog.ndim == 1 or self.model.exog.shape[1] == 1
):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return self.model.predict(self.params, exog, *args, **kwargs)
|
https://github.com/statsmodels/statsmodels/issues/2171
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 540, in runfile
execfile(filename, namespace)
File "/home/adrian/Desktop/ToDo/statsmodels_debugging/OLS.py", line 34, in <module>
fit.predict( exog=data[:-1] ) # fails
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.1-py2.7-linux-i686.egg/statsmodels/base/model.py", line 739, in predict
exog = dmatrix(self.model.data.orig_exog.design_info.builder,
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 1936, in __getattr__
(type(self).__name__, name))
AttributeError: 'DataFrame' object has no attribute 'design_info'
|
AttributeError
|
def handle_formula_data(Y, X, formula, depth=0, missing="drop"):
"""
Returns endog, exog, and the model specification from arrays and formula
Parameters
----------
Y : array-like
Either endog (the LHS) of a model specification or all of the data.
Y must define __getitem__ for now.
X : array-like
Either exog or None. If all the data for the formula is provided in
Y then you must explicitly set X to None.
formula : str or patsy.model_desc
You can pass a handler by import formula_handler and adding a
key-value pair where the key is the formula object class and
the value is a function that returns endog, exog, formula object
Returns
-------
endog : array-like
Should preserve the input type of Y,X
exog : array-like
Should preserve the input type of Y,X. Could be None.
"""
# half ass attempt to handle other formula objects
if isinstance(formula, tuple(iterkeys(formula_handler))):
return formula_handler[type(formula)]
na_action = NAAction(on_NA=missing)
if X is not None:
if data_util._is_using_pandas(Y, X):
result = dmatrices(
formula, (Y, X), depth, return_type="dataframe", NA_action=na_action
)
else:
result = dmatrices(
formula, (Y, X), depth, return_type="dataframe", NA_action=na_action
)
else:
if data_util._is_using_pandas(Y, None):
result = dmatrices(
formula, Y, depth, return_type="dataframe", NA_action=na_action
)
else:
result = dmatrices(
formula, Y, depth, return_type="dataframe", NA_action=na_action
)
# if missing == 'raise' there's not missing_mask
missing_mask = getattr(na_action, "missing_mask", None)
if not np.any(missing_mask):
missing_mask = None
if len(result) > 1: # have RHS design
design_info = result[1].design_info # detach it from DataFrame
else:
design_info = None
# NOTE: is there ever a case where we'd need LHS design_info?
return result, missing_mask, design_info
|
def handle_formula_data(Y, X, formula, depth=0, missing="drop"):
"""
Returns endog, exog, and the model specification from arrays and formula
Parameters
----------
Y : array-like
Either endog (the LHS) of a model specification or all of the data.
Y must define __getitem__ for now.
X : array-like
Either exog or None. If all the data for the formula is provided in
Y then you must explicitly set X to None.
formula : str or patsy.model_desc
You can pass a handler by import formula_handler and adding a
key-value pair where the key is the formula object class and
the value is a function that returns endog, exog, formula object
Returns
-------
endog : array-like
Should preserve the input type of Y,X
exog : array-like
Should preserve the input type of Y,X. Could be None.
"""
# half ass attempt to handle other formula objects
if isinstance(formula, tuple(iterkeys(formula_handler))):
return formula_handler[type(formula)]
na_action = NAAction(on_NA=missing)
if X is not None:
if data_util._is_using_pandas(Y, X):
result = dmatrices(
formula, (Y, X), depth, return_type="dataframe", NA_action=na_action
)
else:
result = dmatrices(
formula, (Y, X), depth, return_type="dataframe", NA_action=na_action
)
else:
if data_util._is_using_pandas(Y, None):
result = dmatrices(
formula, Y, depth, return_type="dataframe", NA_action=na_action
)
else:
result = dmatrices(
formula, Y, depth, return_type="dataframe", NA_action=na_action
)
# if missing == 'raise' there's not missing_mask
missing_mask = getattr(na_action, "missing_mask", None)
if not np.any(missing_mask):
missing_mask = None
return result, missing_mask
|
https://github.com/statsmodels/statsmodels/issues/2171
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 540, in runfile
execfile(filename, namespace)
File "/home/adrian/Desktop/ToDo/statsmodels_debugging/OLS.py", line 34, in <module>
fit.predict( exog=data[:-1] ) # fails
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.1-py2.7-linux-i686.egg/statsmodels/base/model.py", line 739, in predict
exog = dmatrix(self.model.data.orig_exog.design_info.builder,
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 1936, in __getattr__
(type(self).__name__, name))
AttributeError: 'DataFrame' object has no attribute 'design_info'
|
AttributeError
|
def anova_single(model, **kwargs):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
typ : int or str {1,2,3} or {"I","II","III"}
Type of sum of squares to use.
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
typ = kwargs.get("typ", 1)
robust = kwargs.get("robust", None)
if robust:
robust = robust.lower()
endog = model.model.endog
exog = model.model.exog
nobs = exog.shape[0]
response_name = model.model.endog_names
design_info = model.model.data.design_info
exog_names = model.model.exog_names
# +1 for resids
n_rows = len(design_info.terms) - _has_intercept(design_info) + 1
pr_test = "PR(>%s)" % test
names = ["df", "sum_sq", "mean_sq", test, pr_test]
table = DataFrame(np.zeros((n_rows, 5)), columns=names)
if typ in [1, "I"]:
return anova1_lm_single(
model, endog, exog, nobs, design_info, table, n_rows, test, pr_test, robust
)
elif typ in [2, "II"]:
return anova2_lm_single(model, design_info, n_rows, test, pr_test, robust)
elif typ in [3, "III"]:
return anova3_lm_single(model, design_info, n_rows, test, pr_test, robust)
elif typ in [4, "IV"]:
raise NotImplemented("Type IV not yet implemented")
else: # pragma: no cover
raise ValueError("Type %s not understood" % str(typ))
|
def anova_single(model, **kwargs):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
typ : int or str {1,2,3} or {"I","II","III"}
Type of sum of squares to use.
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
typ = kwargs.get("typ", 1)
robust = kwargs.get("robust", None)
if robust:
robust = robust.lower()
endog = model.model.endog
exog = model.model.exog
nobs = exog.shape[0]
response_name = model.model.endog_names
design_info = model.model.data.orig_exog.design_info
exog_names = model.model.exog_names
# +1 for resids
n_rows = len(design_info.terms) - _has_intercept(design_info) + 1
pr_test = "PR(>%s)" % test
names = ["df", "sum_sq", "mean_sq", test, pr_test]
table = DataFrame(np.zeros((n_rows, 5)), columns=names)
if typ in [1, "I"]:
return anova1_lm_single(
model, endog, exog, nobs, design_info, table, n_rows, test, pr_test, robust
)
elif typ in [2, "II"]:
return anova2_lm_single(model, design_info, n_rows, test, pr_test, robust)
elif typ in [3, "III"]:
return anova3_lm_single(model, design_info, n_rows, test, pr_test, robust)
elif typ in [4, "IV"]:
raise NotImplemented("Type IV not yet implemented")
else: # pragma: no cover
raise ValueError("Type %s not understood" % str(typ))
|
https://github.com/statsmodels/statsmodels/issues/2171
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 540, in runfile
execfile(filename, namespace)
File "/home/adrian/Desktop/ToDo/statsmodels_debugging/OLS.py", line 34, in <module>
fit.predict( exog=data[:-1] ) # fails
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.1-py2.7-linux-i686.egg/statsmodels/base/model.py", line 739, in predict
exog = dmatrix(self.model.data.orig_exog.design_info.builder,
File "/usr/local/lib/python2.7/dist-packages/pandas/core/generic.py", line 1936, in __getattr__
(type(self).__name__, name))
AttributeError: 'DataFrame' object has no attribute 'design_info'
|
AttributeError
|
def simultaneous_ci(q_crit, var, groupnobs, pairindices=None):
"""Compute simultaneous confidence intervals for comparison of means.
q_crit value is generated from tukey hsd test. Variance is considered
across all groups. Returned halfwidths can be thought of as uncertainty
intervals around each group mean. They allow for simultaneous
comparison of pairwise significance among any pairs (by checking for
overlap)
Parameters
----------
q_crit : float
The Q critical value studentized range statistic from Tukey's HSD
var : float
The group variance
groupnobs : array-like object
Number of observations contained in each group.
pairindices : tuple of lists, optional
Indices corresponding to the upper triangle of matrix. Computed
here if not supplied
Returns
-------
halfwidths : ndarray
Half the width of each confidence interval for each group given in
groupnobs
See Also
--------
MultiComparison : statistics class providing significance tests
tukeyhsd : among other things, computes q_crit value
References
----------
.. [1] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.)
"""
# Set initial variables
ng = len(groupnobs)
if pairindices is None:
pairindices = np.triu_indices(ng, 1)
# Compute dij for all pairwise comparisons ala hochberg p. 95
gvar = var / groupnobs
d12 = np.sqrt(gvar[pairindices[0]] + gvar[pairindices[1]])
# Create the full d matrix given all known dij vals
d = np.zeros((ng, ng))
d[pairindices] = d12
d = d + d.conj().T
# Compute the two global sums from hochberg eq 3.32
sum1 = np.sum(d12)
sum2 = np.sum(d, axis=0)
if ng > 2:
w = ((ng - 1.0) * sum2 - sum1) / ((ng - 1.0) * (ng - 2.0))
else:
w = sum1 * np.ones(2, 1) / 2.0
return (q_crit / np.sqrt(2)) * w
|
def simultaneous_ci(q_crit, var, groupnobs, pairindices=None):
"""Compute simultaneous confidence intervals for comparison of means.
q_crit value is generated from tukey hsd test. Variance is considered
across all groups. Returned halfwidths can be thought of as uncertainty
intervals around each group mean. They allow for simultaneous
comparison of pairwise significance among any pairs (by checking for
overlap)
Parameters
----------
q_crit : float
The Q critical value studentized range statistic from Tukey's HSD
var : float
The group variance
groupnobs : array-like object
Number of observations contained in each group.
pairindices : tuple of lists, optional
Indices corresponding to the upper triangle of matrix. Computed
here if not supplied
Returns
-------
halfwidths : ndarray
Half the width of each confidence interval for each group given in
groupnobs
See Also
--------
MultiComparison : statistics class providing significance tests
tukeyhsd : among other things, computes q_crit value
References
----------
.. [1] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.)
"""
# Set initial variables
ng = len(groupnobs)
if pairindices is None:
pairindices = np.triu_indices(ng, 1)
# Compute dij for all pairwise comparisons ala hochberg p. 95
gvar = var / groupnobs
d12 = np.sqrt(gvar[pairindices[0]] + gvar[pairindices[1]])
# Create the full d matrix given all known dij vals
d = np.zeros((ng, ng))
d[pairindices] = d12
d = d + d.conj().T
# Compute the two global sums from hochberg eq 3.32
sum1 = np.sum(d12)
sum2 = np.sum(d, axis=0)
if ng > 2:
w = ((ng - 1.0) * sum2 - sum1) / ((ng - 1.0) * (ng - 2.0))
else:
w = sum1 * ones(2, 1) / 2.0
return (q_crit / np.sqrt(2)) * w
|
https://github.com/statsmodels/statsmodels/issues/2065
|
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-30-234737c8d0af> in <module>()
----> 1 tuk.plot_simultaneous()
/home/thauck/.virtualenvs/zues/local/lib/python2.7/site-packages/statsmodels/sandbox/stats/multicomp.pyc in plot_simultaneous(self, comparison_name, ax, figsize, xlabel, ylabel)
712 fig.set_size_inches(figsize)
713 if getattr(self, 'halfwidths', None) is None:
--> 714 self._simultaneous_ci()
715 means = self._multicomp.groupstats.groupmean
716
/home/thauck/.virtualenvs/zues/local/lib/python2.7/site-packages/statsmodels/sandbox/stats/multicomp.pyc in _simultaneous_ci(self)
640 self.halfwidths = simultaneous_ci(self.q_crit, self.variance,
641 self._multicomp.groupstats.groupnobs,
--> 642 self._multicomp.pairindices)
643
644 def plot_simultaneous(self, comparison_name=None, ax=None, figsize=(10,6),
/home/thauck/.virtualenvs/zues/local/lib/python2.7/site-packages/statsmodels/sandbox/stats/multicomp.pyc in simultaneous_ci(q_crit, var, groupnobs, pairindices)
1321 w = ((ng-1.) * sum2 - sum1) / ((ng - 1.) * (ng - 2.))
1322 else:
-> 1323 w = sum1 * ones(2, 1) / 2.
1324
1325 return (q_crit / np.sqrt(2))*w
NameError: global name 'ones' is not defined
|
NameError
|
def __init__(
self,
endog,
exog,
groups,
time=None,
family=None,
cov_struct=None,
missing="none",
offset=None,
dep_data=None,
constraint=None,
update_dep=True,
):
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
groups = np.array(groups) # in case groups is pandas
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(
endog,
exog,
groups=groups,
time=time,
offset=offset,
dep_data=dep_data,
missing=missing,
)
self._init_keys.extend(["update_dep", "constraint", "family", "cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = dependence_structures.Independence()
else:
if not issubclass(cov_struct.__class__, CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod cov_struct instance")
self.cov_struct = cov_struct
if offset is None:
self.offset = np.zeros(self.exog.shape[0], dtype=np.float64)
else:
self.offset = offset
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the "
"constraint must have the same number of columns "
"as the exog matrix."
)
self.constraint = ParameterConstraint(constraint[0], constraint[1], self.exog)
self.offset += self.constraint.offset_increment()
self.exog = self.constraint.reduced_exog()
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the clusters.
group_labels = sorted(set(self.groups))
group_indices = dict((s, []) for s in group_labels)
for i in range(len(self.endog)):
group_indices[self.groups[i]].append(i)
for k in iterkeys(group_indices):
group_indices[k] = np.asarray(group_indices[k])
self.group_indices = group_indices
self.group_labels = group_labels
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = [
np.arange(len(y), dtype=np.float64)[:, None] for y in self.endog_li
]
self.time = np.concatenate(self.time_li)
self.offset_li = self.cluster_list(self.offset)
if constraint is not None:
self.constraint.exog_fulltrans_li = self.cluster_list(
self.constraint.exog_fulltrans
)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# mean_deriv is the derivative of E[endog|exog] with respect
# to params
try:
# This custom mean_deriv is currently only used for the
# multinomial logit model
self.mean_deriv = self.family.link.mean_deriv
except AttributeError:
# Otherwise it can be obtained easily from inverse_deriv
mean_deriv_lpr = self.family.link.inverse_deriv
def mean_deriv(exog, lpr):
dmat = exog * mean_deriv_lpr(lpr)[:, None]
return dmat
self.mean_deriv = mean_deriv
# mean_deriv_exog is the derivative of E[endog|exog] with
# respect to exog
try:
# This custom mean_deriv_exog is currently only used for
# the multinomial logit model
self.mean_deriv_exog = self.family.link.mean_deriv_exog
except AttributeError:
# Otherwise it can be obtained easily from inverse_deriv
mean_deriv_lpr = self.family.link.inverse_deriv
def mean_deriv_exog(exog, params):
lpr = np.dot(exog, params)
dmat = np.outer(mean_deriv_lpr(lpr), params)
return dmat
self.mean_deriv_exog = mean_deriv_exog
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
|
def __init__(
self,
endog,
exog,
groups,
time=None,
family=None,
cov_struct=None,
missing="none",
offset=None,
dep_data=None,
constraint=None,
update_dep=True,
):
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
groups = np.array(groups) # in case groups is pandas
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(
endog,
exog,
groups=groups,
time=time,
offset=offset,
dep_data=dep_data,
missing=missing,
)
self._init_keys.extend(["update_dep", "constraint", "family", "cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = dependence_structures.Independence()
else:
if not issubclass(cov_struct.__class__, CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod cov_struct instance")
self.cov_struct = cov_struct
if offset is None:
self.offset = np.zeros(self.exog.shape[0], dtype=np.float64)
else:
self.offset = offset
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the "
"constraint must have the same number of columns "
"as the exog matrix."
)
self.constraint = ParameterConstraint(constraint[0], constraint[1], self.exog)
self.offset += self.constraint.offset_increment()
self.exog = self.constraint.reduced_exog()
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the clusters.
group_labels = sorted(set(groups))
group_indices = dict((s, []) for s in group_labels)
for i in range(len(self.endog)):
group_indices[groups[i]].append(i)
for k in iterkeys(group_indices):
group_indices[k] = np.asarray(group_indices[k])
self.group_indices = group_indices
self.group_labels = group_labels
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = [
np.arange(len(y), dtype=np.float64)[:, None] for y in self.endog_li
]
self.time = np.concatenate(self.time_li)
self.offset_li = self.cluster_list(self.offset)
if constraint is not None:
self.constraint.exog_fulltrans_li = self.cluster_list(
self.constraint.exog_fulltrans
)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# mean_deriv is the derivative of E[endog|exog] with respect
# to params
try:
# This custom mean_deriv is currently only used for the
# multinomial logit model
self.mean_deriv = self.family.link.mean_deriv
except AttributeError:
# Otherwise it can be obtained easily from inverse_deriv
mean_deriv_lpr = self.family.link.inverse_deriv
def mean_deriv(exog, lpr):
dmat = exog * mean_deriv_lpr(lpr)[:, None]
return dmat
self.mean_deriv = mean_deriv
# mean_deriv_exog is the derivative of E[endog|exog] with
# respect to exog
try:
# This custom mean_deriv_exog is currently only used for
# the multinomial logit model
self.mean_deriv_exog = self.family.link.mean_deriv_exog
except AttributeError:
# Otherwise it can be obtained easily from inverse_deriv
mean_deriv_lpr = self.family.link.inverse_deriv
def mean_deriv_exog(exog, params):
lpr = np.dot(exog, params)
dmat = np.outer(mean_deriv_lpr(lpr), params)
return dmat
self.mean_deriv_exog = mean_deriv_exog
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
|
https://github.com/statsmodels/statsmodels/issues/1877
|
OK!!
Traceback (most recent call last):
File "t.py", line 59, in <module>
cov_struct=Independence(), family=sm.families.Binomial())
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/genmod/generalized_estimating_equations.py", line 261, in __init__
constraint=constraint)
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/genmod/generalized_estimating_equations.py", line 335, in _reset
self.endog_li = self.cluster_list(self.endog)
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/genmod/generalized_estimating_equations.py", line 411, in cluster_list
for k in self.group_labels]
IndexError: arrays used as indices must be of integer (or boolean) type
|
IndexError
|
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]), dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for exog_row, endog_value, group_value, time_value, offset_value in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = int(endog_value == thresh)
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if type(self.exog_orig) == pd.DataFrame:
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.names)
return endog_out, exog_out, groups_out, time_out, offset_out
|
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]), dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for exog_row, endog_value, group_value, time_value, offset_value in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = int(endog_value == thresh)
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if type(self.exog_orig) == pd.DataFrame:
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.names)
return endog_out, exog_out, groups_out, time_out, offset_out
|
https://github.com/statsmodels/statsmodels/issues/1931
|
======================================================================
ERROR: statsmodels.genmod.tests.test_gee.TestGEEMultinomialCovType.test_wrapper
----------------------------------------------------------------------
Traceback (most recent call last):
File "c:\programs\python27\lib\site-packages\nose-1.0.0-py2.7.egg\nose\case.py", line 187, in runTest
self.test(*self.arg)
File "E:\Josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2_py27\statsmodels\statsmodels\genmod\tests\test_gee.py", line 968, in test_wrapper
@classmethod
File "E:\Josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2_py27\statsmodels\statsmodels\genmod\generalized_estimating_equations.py", line 1692, in __init__
exog, groups, time, offset)
File "E:\Josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2_py27\statsmodels\statsmodels\genmod\generalized_estimating_equations.py", line 1742, in setup_nominal
exog_out[jrow, :] = np.kron(u, exog_row)
ValueError: operands could not be broadcast together with shapes (4) (2)
|
ValueError
|
def __init__(
self, kls, func, funcinvplus, funcinvminus, derivplus, derivminus, *args, **kwargs
):
# print args
# print kwargs
self.func = func
self.funcinvplus = funcinvplus
self.funcinvminus = funcinvminus
self.derivplus = derivplus
self.derivminus = derivminus
# explicit for self.__dict__.update(kwargs)
# need to set numargs because inspection does not work
self.numargs = kwargs.pop("numargs", 0)
# print self.numargs
name = kwargs.pop("name", "transfdist")
longname = kwargs.pop("longname", "Non-linear transformed distribution")
extradoc = kwargs.pop("extradoc", None)
a = kwargs.pop("a", -np.inf) # attached to self in super
b = kwargs.pop("b", np.inf) # self.a, self.b would be overwritten
self.shape = kwargs.pop("shape", False)
# defines whether it is a `u` shaped or `hump' shaped
# transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls # (self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(TransfTwo_gen, self).__init__(
a=a, b=b, name=name, shapes=kls.shapes, longname=longname, extradoc=extradoc
)
# add enough info for self.freeze() to be able to reconstruct the instance
try:
self._ctor_param.update(
dict(
kls=kls,
func=func,
funcinvplus=funcinvplus,
funcinvminus=funcinvminus,
derivplus=derivplus,
derivminus=derivminus,
shape=self.shape,
)
)
except AttributeError:
# scipy < 0.14 does not have this, ignore and do nothing
pass
|
def __init__(
self, kls, func, funcinvplus, funcinvminus, derivplus, derivminus, *args, **kwargs
):
# print args
# print kwargs
self.func = func
self.funcinvplus = funcinvplus
self.funcinvminus = funcinvminus
self.derivplus = derivplus
self.derivminus = derivminus
# explicit for self.__dict__.update(kwargs)
# need to set numargs because inspection does not work
self.numargs = kwargs.pop("numargs", 0)
# print self.numargs
name = kwargs.pop("name", "transfdist")
longname = kwargs.pop("longname", "Non-linear transformed distribution")
extradoc = kwargs.pop("extradoc", None)
a = kwargs.pop("a", -np.inf) # attached to self in super
b = kwargs.pop("b", np.inf) # self.a, self.b would be overwritten
self.shape = kwargs.pop("shape", False)
# defines whether it is a `u` shaped or `hump' shaped
# transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls # (self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(TransfTwo_gen, self).__init__(
a=a, b=b, name=name, shapes=kls.shapes, longname=longname, extradoc=extradoc
)
|
https://github.com/statsmodels/statsmodels/issues/1864
|
======================================================================
ERROR: Failure: TypeError (__init__() takes at least 7 arguments (1 given))
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/nose/loader.py", line 519, in makeTest
return self._makeTest(obj, parent)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/nose/loader.py", line 578, in _makeTest
return MethodTestCase(obj)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/nose/case.py", line 345, in __init__
self.inst = self.cls()
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/sandbox/distributions/tests/testtransf.py", line 94, in __init__
(squaretg(10), stats.f(1, 10))] #try both frozen
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py", line 739, in __call__
return self.freeze(*args, **kwds)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py", line 736, in freeze
return rv_frozen(self, *args, **kwds)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py", line 431, in __init__
self.dist = dist.__class__(**dist._ctor_param)
TypeError: __init__() takes at least 7 arguments (1 given)
======================================================================
ERROR: Failure: TypeError (__init__() takes at least 7 arguments (1 given))
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/nose/loader.py", line 519, in makeTest
return self._makeTest(obj, parent)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/nose/loader.py", line 578, in _makeTest
return MethodTestCase(obj)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/nose/case.py", line 345, in __init__
self.inst = self.cls()
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/sandbox/distributions/tests/testtransf.py", line 94, in __init__
(squaretg(10), stats.f(1, 10))] #try both frozen
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py", line 739, in __call__
return self.freeze(*args, **kwds)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py", line 736, in freeze
return rv_frozen(self, *args, **kwds)
File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py", line 431, in __init__
self.dist = dist.__class__(**dist._ctor_param)
TypeError: __init__() takes at least 7 arguments (1 given)
----------------------------------------------------------------------
Ran 3116 tests in 601.138s
FAILED (SKIP=23, errors=2)
|
TypeError
|
def _infer_freq(dates):
maybe_freqstr = getattr(dates, "freqstr", None)
if maybe_freqstr is not None:
return maybe_freqstr
try:
from pandas.tseries.api import infer_freq
freq = infer_freq(dates)
return freq
except ImportError:
pass
timedelta = datetime.timedelta
nobs = min(len(dates), 6)
if nobs == 1:
raise ValueError("Cannot infer frequency from one date")
if hasattr(dates, "values"):
dates = dates.values # can't do a diff on a DateIndex
diff = np.diff(dates[:nobs])
delta = _add_datetimes(diff)
nobs -= 1 # after diff
if delta == timedelta(nobs): # greedily assume 'D'
return "D"
elif delta == timedelta(nobs + 2):
return "B"
elif delta == timedelta(7 * nobs):
return "W"
elif delta >= timedelta(28 * nobs) and delta <= timedelta(31 * nobs):
return "M"
elif delta >= timedelta(90 * nobs) and delta <= timedelta(92 * nobs):
return "Q"
elif delta >= timedelta(365 * nobs) and delta <= timedelta(366 * nobs):
return "A"
else:
return
|
def _infer_freq(dates):
if hasattr(dates, "freqstr"):
return dates.freqstr
try:
from pandas.tseries.api import infer_freq
freq = infer_freq(dates)
return freq
except ImportError:
pass
timedelta = datetime.timedelta
nobs = min(len(dates), 6)
if nobs == 1:
raise ValueError("Cannot infer frequency from one date")
if hasattr(dates, "values"):
dates = dates.values # can't do a diff on a DateIndex
diff = np.diff(dates[:nobs])
delta = _add_datetimes(diff)
nobs -= 1 # after diff
if delta == timedelta(nobs): # greedily assume 'D'
return "D"
elif delta == timedelta(nobs + 2):
return "B"
elif delta == timedelta(7 * nobs):
return "W"
elif delta >= timedelta(28 * nobs) and delta <= timedelta(31 * nobs):
return "M"
elif delta >= timedelta(90 * nobs) and delta <= timedelta(92 * nobs):
return "Q"
elif delta >= timedelta(365 * nobs) and delta <= timedelta(366 * nobs):
return "A"
else:
return
|
https://github.com/statsmodels/statsmodels/issues/1822
|
======================================================================
ERROR: statsmodels.graphics.tests.test_tsaplots.test_plot_month
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/usr/lib/python2.7/dist-packages/numpy/testing/decorators.py", line 146, in skipper_func
return f(*args, **kwargs)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/graphics/tests/test_tsaplots.py", line 47, in test_plot_month
fig = month_plot(dta)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/graphics/tsaplots.py", line 246, in month_plot
_check_period_index(x, freq="M")
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tools/data.py", line 22, in _check_period_index
if not inferred_freq.startswith(freq):
AttributeError: 'NoneType' object has no attribute 'startswith'
======================================================================
ERROR: statsmodels.graphics.tests.test_tsaplots.test_plot_quarter
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/usr/lib/python2.7/dist-packages/numpy/testing/decorators.py", line 146, in skipper_func
return f(*args, **kwargs)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/graphics/tests/test_tsaplots.py", line 72, in test_plot_quarter
quarter_plot(dta.unemp)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/graphics/tsaplots.py", line 280, in quarter_plot
_check_period_index(x, freq="Q")
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tools/data.py", line 22, in _check_period_index
if not inferred_freq.startswith(freq):
AttributeError: 'NoneType' object has no attribute 'startswith'
======================================================================
ERROR: statsmodels.tsa.tests.test_arima.test_arma_predict_indices
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/tests/test_arima.py", line 971, in test_arma_predict_indices
_check_start(*((model,) + case))
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/tests/test_arima.py", line 917, in _check_start
start = model._get_predict_start(given, dynamic)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/arima_model.py", line 581, in _get_predict_start
method)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/arima_model.py", line 306, in _validate
start = _index_date(start, dates)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/base/datetools.py", line 57, in _index_date
"an integer" % date)
ValueError: There is no frequency for these dates and date 2009-12-31 00:00:00 is not in dates index. Try giving a date that is in the dates index or use an integer
======================================================================
ERROR: statsmodels.tsa.tests.test_arima.test_arima_predict_indices
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/tests/test_arima.py", line 1043, in test_arima_predict_indices
_check_start(*((model,) + case))
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/tests/test_arima.py", line 917, in _check_start
start = model._get_predict_start(given, dynamic)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/arima_model.py", line 959, in _get_predict_start
method)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/arima_model.py", line 306, in _validate
start = _index_date(start, dates)
File "/build/buildd/statsmodels-0.6.0~ppa18~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/tsa/base/datetools.py", line 57, in _index_date
"an integer" % date)
ValueError: There is no frequency for these dates and date 2009-12-31 00:00:00 is not in dates index. Try giving a date that is in the dates index or use an integer
|
AttributeError
|
def lowess(
endog,
exog,
frac=2.0 / 3.0,
it=3,
delta=0.0,
is_sorted=False,
missing="drop",
return_sorted=True,
):
"""LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog: 1-D numpy array
The y-values of the observed points
exog: 1-D numpy array
The x-values of the observed points
frac: float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it: int
The number of residual-based reweightings
to perform.
delta: float
Distance within which to use linear-interpolation
instead of weighted regression.
is_sorted : bool
If False (default), then the data will be sorted by exog before
calculating lowess. If True, then it is assumed that the data is
already sorted by exog.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'drop'.
return_sorted : bool
If True (default), then the returned array is sorted by exog and has
missing (nan or infinite) observations removed.
If False, then the returned array is in the same length and the same
sequence of observations as the input array.
Returns
-------
out: ndarray, float
The returned array is two-dimensional if return_sorted is True, and
one dimensional if return_sorted is False.
If return_sorted is True, then a numpy array with two columns. The
first column contains the sorted x (exog) values and the second column
the associated estimated y (endog) values.
If return_sorted is False, then only the fitted values are returned,
and the observations will be in the same order as the input arrays.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the `smooth` y_i by taking the frac*N closest points
to (x_i,y_i) based on their x values and estimating y_i
using a weighted linear regression. The weight for (x_j,y_j)
is tricube function applied to |x_i-x_j|.
If it > 1, then further weighted local linear regressions
are performed, where the weights are the same as above
times the _lowess_bisquare function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with df>2,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
`delta` can be used to save computations. For each `x_i`, regressions
are skipped for points closer than `delta`. The next regression is
fit for the farthest point within delta of `x_i` and all points in
between are estimated by linearly interpolating between the two
regression fits.
Judicious choice of delta can cut computation time considerably
for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.
Some experimentation is likely required to find a good
choice of `frac` and `iter` for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
lowess for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y, x)
>>> w = lowess(y, x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import numpy as np
>>> import scipy.stats as stats
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y, x, frac= 1./3, it=0)
>>> w = lowess(y, x, frac=1./3)
"""
endog = np.asarray(endog, float)
exog = np.asarray(exog, float)
# Inputs should be vectors (1-D arrays) of the
# same length.
if exog.ndim != 1:
raise ValueError("exog must be a vector")
if endog.ndim != 1:
raise ValueError("endog must be a vector")
if endog.shape[0] != exog.shape[0]:
raise ValueError("exog and endog must have same length")
if missing in ["drop", "raise"]:
# Cut out missing values
mask_valid = np.isfinite(exog) & np.isfinite(endog)
all_valid = np.all(mask_valid)
if all_valid:
y = endog
x = exog
else:
if missing == "drop":
x = exog[mask_valid]
y = endog[mask_valid]
else:
raise ValueError("nan or inf found in data")
elif missing == "none":
y = endog
x = exog
all_valid = True # we assume it's true if missing='none'
else:
raise ValueError("missing can only be 'none', 'drop' or 'raise'")
if not is_sorted:
# Sort both inputs according to the ascending order of x values
sort_index = np.argsort(x)
x = np.array(x[sort_index])
y = np.array(y[sort_index])
res = _lowess(y, x, frac=frac, it=it, delta=delta)
_, yfitted = res.T
if return_sorted or (all_valid and is_sorted):
return res
else:
# rebuild yfitted with original indices
# a bit messy: y might have been selected twice
if not is_sorted:
yfitted_ = np.empty_like(y)
yfitted_.fill(np.nan)
yfitted_[sort_index] = yfitted
yfitted = yfitted_
if not all_valid:
yfitted_ = np.empty_like(endog)
yfitted_.fill(np.nan)
yfitted_[mask_valid] = yfitted
yfitted = yfitted_
# we don't need to return exog anymore
return yfitted
|
def lowess(
endog,
exog,
frac=2.0 / 3.0,
it=3,
delta=0.0,
is_sorted=False,
missing="drop",
return_sorted=True,
):
"""LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog: 1-D numpy array
The y-values of the observed points
exog: 1-D numpy array
The x-values of the observed points
frac: float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it: int
The number of residual-based reweightings
to perform.
delta: float
Distance within which to use linear-interpolation
instead of weighted regression.
is_sorted : bool
If False (default), then the data will be sorted by exog before
calculating lowess. If True, then it is assumed that the data is
already sorted by exog.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'drop'.
return_sorted : bool
If True (default), then the returned array is sorted by exog and has
missing (nan or infinite) observations removed.
If False, then the returned array is in the same length and the same
sequence of observations as the input array.
Returns
-------
out: ndarray, float
The returned array is two-dimensional if return_sorted is True, and
one dimensional if return_sorted is False.
If return_sorted is True, then a numpy array with two columns. The
first column contains the sorted x (exog) values and the second column
the associated estimated y (endog) values.
If return_sorted is False, then only the fitted values are returned,
and the observations will be in the same order as the input arrays.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the `smooth` y_i by taking the frac*N closest points
to (x_i,y_i) based on their x values and estimating y_i
using a weighted linear regression. The weight for (x_j,y_j)
is tricube function applied to |x_i-x_j|.
If it > 1, then further weighted local linear regressions
are performed, where the weights are the same as above
times the _lowess_bisquare function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with df>2,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
`delta` can be used to save computations. For each `x_i`, regressions
are skipped for points closer than `delta`. The next regression is
fit for the farthest point within delta of `x_i` and all points in
between are estimated by linearly interpolating between the two
regression fits.
Judicious choice of delta can cut computation time considerably
for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.
Some experimentation is likely required to find a good
choice of `frac` and `iter` for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
lowess for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y, x)
>>> w = lowess(y, x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import numpy as np
>>> import scipy.stats as stats
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y, x, frac= 1./3, it=0)
>>> w = lowess(y, x, frac=1./3)
"""
endog = np.asarray(endog, float)
exog = np.asarray(exog, float)
# Inputs should be vectors (1-D arrays) of the
# same length.
if exog.ndim != 1:
raise ValueError("exog must be a vector")
if endog.ndim != 1:
raise ValueError("endog must be a vector")
if endog.shape[0] != exog.shape[0]:
raise ValueError("exog and endog must have same length")
if missing in ["drop", "raise"]:
# Cut out missing values
mask_valid = np.isfinite(exog) & np.isfinite(endog)
all_valid = np.all(mask_valid)
if all_valid:
y = endog
x = exog
else:
if missing == "drop":
x = exog[mask_valid]
y = endog[mask_valid]
else:
raise ValueError("nan or inf found in data")
elif missing == "none":
y = endog
x = exog
all_valid = True # we assume it's true if missing='none'
else:
raise ValueError("missing can only be 'none', 'drop' or 'raise'")
if not is_sorted:
# Sort both inputs according to the ascending order of x values
sort_index = np.argsort(x)
x = np.array(x[sort_index])
y = np.array(y[sort_index])
res = _lowess(y, x, frac=frac, it=it, delta=delta)
_, yfitted = res.T
if return_sorted or (all_valid and is_sorted):
return res
else:
# rebuild yfitted with original indices
# a bit messy: y might have been selected twice
if not is_sorted:
yfitted_ = np.empty_like(endog)
yfitted_.fill(np.nan)
yfitted_[sort_index] = yfitted
yfitted = yfitted_
if not all_valid:
yfitted_ = np.empty_like(endog)
yfitted_.fill(np.nan)
yfitted_[mask_valid] = yfitted
yfitted = yfitted_
# we don't need to return exog anymore
return yfitted
|
https://github.com/statsmodels/statsmodels/issues/967
|
======================================================================
ERROR: statsmodels.nonparametric.tests.test_lowess.TestLowess.test_options
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/tests/test_lowess.py", line 144, in test_options
return_sorted=False)
File "/build/buildd/statsmodels-0.5.0~ppa17~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/smoothers_lowess.py", line 182, in lowess
yfitted_[mask_valid] = yfitted
ValueError: NumPy boolean array indexing assignment cannot assign 20 input values to the 17 output values where the mask is true
|
ValueError
|
def fit(
self,
maxlag=None,
method="cmle",
ic=None,
trend="c",
transparams=True,
start_params=None,
solver=None,
maxiter=35,
full_output=1,
disp=1,
callback=None,
**kwargs,
):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used. The default is 'l_bfgs' (limited memory Broyden-
Fletcher-Goldfarb-Shanno). Other choices are 'bfgs', 'newton'
(Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient),
'ncg' (non-conjugate gradient), and 'powell'.
The limited memory BFGS uses m=30 to approximate the Hessian,
projected gradient tolerance of 1e-7 and factr = 1e3. These
cannot currently be changed for l_bfgs. See notes for more
information.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information on using
the solvers.
"""
method = method.lower()
if method not in ["cmle", "yw", "mle"]:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12 * (nobs / 100.0) ** (1 / 4.0)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ["aic", "bic", "hqic", "t-stat"]:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
k = k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if solver:
solver = solver.lower()
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr / arfit.nobs # needed for predict fcasterr
if method == "mle":
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError(
"Length of start params is %d. There"
" are %d parameters." % (len(start_params), k_trend + k_ar)
)
start_params = self._invtransparams(start_params)
loglike = lambda params: -self.loglike(params)
if solver == None: # use limited memory bfgs
bounds = [(None,) * 2] * (k_ar + k)
mlefit = optimize.fmin_l_bfgs_b(
loglike,
start_params,
approx_grad=True,
m=12,
pgtol=1e-8,
factr=1e2,
bounds=bounds,
iprint=disp,
)
self.mlefit = mlefit
params = mlefit[0]
else:
mlefit = super(AR, self).fit(
start_params=start_params,
method=solver,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs,
)
self.mlefit = mlefit
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
# elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
return ARResultsWrapper(arfit)
|
def fit(
self,
maxlag=None,
method="cmle",
ic=None,
trend="c",
transparams=True,
start_params=None,
solver=None,
maxiter=35,
full_output=1,
disp=1,
callback=None,
**kwargs,
):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used. The default is 'l_bfgs' (limited memory Broyden-
Fletcher-Goldfarb-Shanno). Other choices are 'bfgs', 'newton'
(Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient),
'ncg' (non-conjugate gradient), and 'powell'.
The limited memory BFGS uses m=30 to approximate the Hessian,
projected gradient tolerance of 1e-7 and factr = 1e3. These
cannot currently be changed for l_bfgs. See notes for more
information.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information on using
the solvers.
"""
method = method.lower()
if method not in ["cmle", "yw", "mle"]:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12 * (nobs / 100.0) ** (1 / 4.0)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ["aic", "bic", "hqic", "t-stat"]:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
k = k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if solver:
solver = solver.lower()
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr / arfit.nobs # needed for predict fcasterr
if method == "mle":
self.nobs = nobs
if not start_params:
start_params = OLS(Y, X).fit().params
start_params = self._invtransparams(start_params)
loglike = lambda params: -self.loglike(params)
if solver == None: # use limited memory bfgs
bounds = [(None,) * 2] * (k_ar + k)
mlefit = optimize.fmin_l_bfgs_b(
loglike,
start_params,
approx_grad=True,
m=12,
pgtol=1e-8,
factr=1e2,
bounds=bounds,
iprint=disp,
)
self.mlefit = mlefit
params = mlefit[0]
else:
mlefit = super(AR, self).fit(
start_params=start_params,
method=solver,
maxiter=maxiter,
full_output=full_output,
disp=disp,
callback=callback,
**kwargs,
)
self.mlefit = mlefit
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
# elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
return ARResultsWrapper(arfit)
|
https://github.com/statsmodels/statsmodels/issues/236
|
res1a = AR(data.endog).fit(maxlag=9, start_params=0.1*np.ones(9.),method="mle", disp=-1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-josef_new\statsmodels\tsa\ar_model.py", line 550, in fit
if not start_params:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count("c")
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i + 1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs)(
joblib.delayed(_compute_subset)(
class_type,
data,
bw,
co,
do,
n_cvars,
ix_ord,
ix_unord,
n_sub,
class_vars,
self.randomize,
bounds[i],
)
for i in range(n_blocks)
)
else:
res = []
for i in xrange(n_blocks):
res.append(
_compute_subset(
class_type,
data,
bw,
co,
do,
n_cvars,
ix_ord,
ix_unord,
n_sub,
class_vars,
self.randomize,
bounds[i],
)
)
for i in xrange(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs ** (-1.0 / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs ** (-2.0 / (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs ** (-2.0 / (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
|
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count("c")
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i + 1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs)(
joblib.delayed(_compute_subset)(
class_type,
data,
bw,
co,
do,
n_cvars,
ix_ord,
ix_unord,
n_sub,
class_vars,
self.randomize,
bounds[i],
)
for i in range(n_blocks)
)
else:
res = []
for i in xrange(n_blocks):
res.append(
_compute_subset(
class_type,
data,
bw,
co,
do,
n_cvars,
ix_ord,
ix_unord,
n_sub,
class_vars,
self.randomize,
)
)
for i in xrange(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs ** (-1.0 / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs ** (-2.0 / (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs ** (-2.0 / (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
|
https://github.com/statsmodels/statsmodels/issues/673
|
======================================================================
ERROR: statsmodels.nonparametric.tests.test_kernel_density.TestKDEMultivariate.test_continuous_cvls_efficient
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/build/buildd/statsmodels-0.5.0~ppa15~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/tests/test_kernel_density.py", line 155, in test_continuous_cvls_efficient
defaults=nparam.EstimatorSettings(efficient=True, n_sub=100))
File "/build/buildd/statsmodels-0.5.0~ppa15~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/kernel_density.py", line 118, in __init__
self.bw = self._compute_efficient(bw)
File "/build/buildd/statsmodels-0.5.0~ppa15~revno/debian/python-statsmodels/usr/lib/python2.7/dist-packages/statsmodels/nonparametric/_kernel_base.py", line 210, in _compute_efficient
class_vars, self.randomize))
TypeError: _compute_subset() takes exactly 12 arguments (11 given)
|
TypeError
|
def attach_columns(self, result):
if result.squeeze().ndim <= 1 and len(result) > 1:
return Series(result, index=self.xnames)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.xnames)
|
def attach_columns(self, result):
if result.squeeze().ndim <= 1:
return Series(result, index=self.xnames)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.xnames)
|
https://github.com/statsmodels/statsmodels/issues/706
|
model = ols('LREO_recovery ~ 0 + HREO_recovery', df_product)
results = model.fit()
print results.summary()
OLS Regression Results
==============================================================================
Dep. Variable: LREO_recovery R-squared: 0.999
Model: OLS Adj. R-squared: 0.999
Method: Least Squares F-statistic: inf
Date: Sat, 16 Mar 2013 Prob (F-statistic): nan
Time: 20:35:35 Log-Likelihood: 165.81
No. Observations: 54 AIC: -329.6
Df Residuals: 53 BIC: -327.6
Df Model: 0
=================================================================================
coef std err t P>|t| [95.0% Conf. Int.]
---------------------------------------------------------------------------------
HREO_recovery 1.0029 0.003 373.607 0.000 0.998 1.008
==============================================================================
Omnibus: 21.160 Durbin-Watson: 1.978
Prob(Omnibus): 0.000 Jarque-Bera (JB): 67.387
Skew: 0.891 Prob(JB): 2.33e-15
Kurtosis: 8.175 Cond. No. 1.00
==============================================================================
results.conf_int()
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-45-6d86a730d4c3> in <module>()
----> 1 results.conf_int()
/usr/local/lib/python2.7/dist-packages/statsmodels/base/wrapper.pyc in wrapper(self, *args, **kwargs)
87 results = object.__getattribute__(self, '_results')
88 data = results.model._data
---> 89 return data.wrap_output(func(results, *args, **kwargs), how)
90
91 argspec = inspect.getargspec(func)
/usr/local/lib/python2.7/dist-packages/statsmodels/base/data.pyc in wrap_output(self, obj, how)
104 def wrap_output(self, obj, how='columns'):
105 if how == 'columns':
--> 106 return self.attach_columns(obj)
107 elif how == 'rows':
108 return self.attach_rows(obj)
/usr/local/lib/python2.7/dist-packages/statsmodels/base/data.pyc in attach_columns(self, result)
166 def attach_columns(self, result):
167 if result.squeeze().ndim <= 1:
--> 168 return Series(result, index=self.xnames)
169 else: # for e.g., confidence intervals
170 return DataFrame(result, index=self.xnames)
/usr/local/lib/python2.7/dist-packages/pandas/core/series.pyc in __new__(cls, data, index, dtype, name, copy)
370
371 subarr = _sanitize_array(data, index, dtype, copy,
--> 372 raise_cast_failure=True)
373
374 if not isinstance(subarr, np.ndarray):
/usr/local/lib/python2.7/dist-packages/pandas/core/series.pyc in _sanitize_array(data, index, dtype, copy, raise_cast_failure)
3127 elif subarr.ndim > 1:
3128 if isinstance(data, np.ndarray):
-> 3129 raise Exception('Data must be 1-dimensional')
3130 else:
3131 subarr = _asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
model = ols('LREO_recovery ~ HREO_recovery', df_product)
results = model.fit()
print results.summary()
OLS Regression Results
==============================================================================
Dep. Variable: LREO_recovery R-squared: 0.999
Model: OLS Adj. R-squared: 0.999
Method: Least Squares F-statistic: 3.925e+04
Date: Sat, 16 Mar 2013 Prob (F-statistic): 1.60e-76
Time: 20:35:51 Log-Likelihood: 166.13
No. Observations: 54 AIC: -328.3
Df Residuals: 52 BIC: -324.3
Df Model: 1
=================================================================================
coef std err t P>|t| [95.0% Conf. Int.]
---------------------------------------------------------------------------------
Intercept 0.0023 0.003 0.789 0.434 -0.004 0.008
HREO_recovery 0.9995 0.005 198.115 0.000 0.989 1.010
==============================================================================
Omnibus: 26.677 Durbin-Watson: 1.990
Prob(Omnibus): 0.000 Jarque-Bera (JB): 93.924
Skew: 1.169 Prob(JB): 4.02e-21
Kurtosis: 9.023 Cond. No. 4.09
==============================================================================
results.conf_int()
Out[47]:
0 1
Intercept -0.003528 0.008103
HREO_recovery 0.989421 1.009669
|
Exception
|
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = np.max(X, axis=0) == 1
min = np.min(X, axis=0) == 0
remainder = np.all(X % 1.0 == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
|
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = np.max(X, axis=0) == 1
min = np.min(X, axis=0) == 0
remainder = np.all(X % 1.0 == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return ind
|
https://github.com/statsmodels/statsmodels/issues/399
|
======================================================================
ERROR: statsmodels.discrete.tests.test_discrete.TestLogitNewton.test_dummy_dydxmean
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/nose-1.1.2-py2.6.egg/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/rgommers/Code/statsmodels/statsmodels/discrete/tests/test_discrete.py", line 183, in test_dummy_dydxmean
assert_almost_equal(self.res1.margeff(at='mean', dummy=True),
File "/Users/rgommers/Code/statsmodels/statsmodels/discrete/discrete_model.py", line 1664, in margeff
dummy_ind[const_idx:] -= 1
TypeError: Cannot cast ufunc subtract output from dtype('int32') to dtype('bool') with casting rule 'same_kind'
|
TypeError
|
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:, i] = 0
effect0 = model.predict(params, exog0)
# fittedvalues0 = np.dot(exog0,params)
exog0[:, i] = 1
effect1 = model.predict(params, exog0)
if "ey" in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[i] = (effect1 - effect0).mean() # mean for overall
return effects
|
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
for i, tf in enumerate(dummy_ind):
if tf == True:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:, i] = 0
effect0 = model.predict(params, exog0)
# fittedvalues0 = np.dot(exog0,params)
exog0[:, i] = 1
effect1 = model.predict(params, exog0)
if "ey" in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[i] = (effect1 - effect0).mean() # mean for overall
return effects
|
https://github.com/statsmodels/statsmodels/issues/399
|
======================================================================
ERROR: statsmodels.discrete.tests.test_discrete.TestLogitNewton.test_dummy_dydxmean
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/nose-1.1.2-py2.6.egg/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/rgommers/Code/statsmodels/statsmodels/discrete/tests/test_discrete.py", line 183, in test_dummy_dydxmean
assert_almost_equal(self.res1.margeff(at='mean', dummy=True),
File "/Users/rgommers/Code/statsmodels/statsmodels/discrete/discrete_model.py", line 1664, in margeff
dummy_ind[const_idx:] -= 1
TypeError: Cannot cast ufunc subtract output from dtype('int32') to dtype('bool') with casting rule 'same_kind'
|
TypeError
|
def summary_params_2dflat(
result,
endog_names=None,
exog_names=None,
alpha=0.95,
use_t=True,
keep_headers=True,
endog_cols=False,
):
# skip_headers2=True):
"""summary table for parameters that are 2d, e.g. multi-equation models
Parameter
---------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
"""
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError("endog_names has wrong length")
else:
if not len(endog_names) == len(params):
raise ValueError("endog_names has wrong length")
n_equ = 1
# VAR doesn't have conf_int
# params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
# this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = "endog"
else:
endog_basename = endog_names
# TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
# check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (
res,
res.params[:, eq],
res.bse[:, eq],
res.tvalues[:, eq],
res.pvalues[:, eq],
res.conf_int(alpha)[eq],
)
# not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(
restup,
yname=endog_names[eq],
xname=exog_names,
alpha=0.05,
use_t=use_t,
skip_header=skiph,
)
tables.append(tble)
# add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
|
def summary_params_2dflat(
result,
endog_names=None,
exog_names=None,
alpha=0.95,
use_t=True,
keep_headers=True,
endog_cols=False,
):
# skip_headers2=True):
"""summary table for parameters that are 2d, e.g. multi-equation models
Parameter
---------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
"""
res = result
# VAR doesn't have conf_int
# params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
# this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = "endog"
else:
endog_basename = endog_names
# TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
# check if we have the right length of names
if not len(endog_names) == res.params.shape[0]:
raise ValueError("endog_names has wrong length")
n_equ = res.params.shape[1]
tables = []
for eq in range(n_equ):
restup = (
res,
res.params[:, eq],
res.bse[:, eq],
res.tvalues[:, eq],
res.pvalues[:, eq],
res.conf_int(alpha)[:, eq],
)
# not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(
restup,
yname=endog_names[eq],
xname=exog_names,
alpha=0.05,
use_t=use_t,
skip_header=skiph,
)
tables.append(tble)
# add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
|
https://github.com/statsmodels/statsmodels/issues/339
|
Traceback (most recent call last):
File "fitting.py", line 83, in <module>
print mlogit_res.summary()
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.4.1-py2.7-linux-x86_64.egg/statsmodels/discrete/discrete_model.py", line 1728, in summary
use_t=False)
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.4.1-py2.7-linux-x86_64.egg/statsmodels/iolib/summary.py", line 823, in add_table_params
alpha=alpha, use_t=use_t)
File "/usr/local/lib/python2.7/dist-packages/statsmodels-0.4.1-py2.7-linux-x86_64.egg/statsmodels/iolib/summary.py", line 629, in summary_params_2dflat
raise ValueError('endog_names has wrong length')
ValueError: endog_names has wrong length
|
ValueError
|
def add_constant(data, prepend=False):
"""
This appends a column of ones to an array if prepend==False.
For ndarrays and pandas.DataFrames, checks to make sure a constant is not
already included. If there is at least one column of ones then the
original object is returned. Does not check for a constant if a structured
or recarray is
given.
Parameters
----------
data : array-like
`data` is the column-ordered design matrix
prepend : bool
True and the constant is prepended rather than appended.
Returns
-------
data : array
The original array with a constant (column of ones) as the first or
last column.
Notes
-----
.. WARNING::
The default of prepend will be changed to True in the next release of
statsmodels. We recommend to use an explicit prepend in any permanent
code.
"""
if not prepend:
import inspect
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
try: # info.code_context is None on python 2.6? Why?
to_warn = info.code_context is not None and "prepend" not in "\n".join(
info.code_context
)
except: # python 2.5 compatibility
to_warn = "prepend" not in "\n".join(info[3])
if to_warn:
import warnings
warnings.warn(
"The default of `prepend` will be changed to True "
"in 0.5.0, use explicit prepend",
FutureWarning,
)
if _is_using_pandas(data, None):
# work on a copy
return _pandas_add_constant(data.copy(), prepend)
else:
data = np.asarray(data)
if not data.dtype.names:
var0 = data.var(0) == 0
if np.any(var0):
return data
data = np.column_stack((data, np.ones((data.shape[0], 1))))
if prepend:
return np.roll(data, 1, 1)
else:
return_rec = data.__class__ is np.recarray
if prepend:
ones = np.ones((data.shape[0], 1), dtype=[("const", float)])
data = nprf.append_fields(
ones,
data.dtype.names,
[data[i] for i in data.dtype.names],
usemask=False,
asrecarray=return_rec,
)
else:
data = nprf.append_fields(
data,
"const",
np.ones(data.shape[0]),
usemask=False,
asrecarray=return_rec,
)
return data
|
def add_constant(data, prepend=False):
"""
This appends a column of ones to an array if prepend==False.
For ndarrays and pandas.DataFrames, checks to make sure a constant is not
already included. If there is at least one column of ones then the
original object is returned. Does not check for a constant if a structured
or recarray is
given.
Parameters
----------
data : array-like
`data` is the column-ordered design matrix
prepend : bool
True and the constant is prepended rather than appended.
Returns
-------
data : array
The original array with a constant (column of ones) as the first or
last column.
Notes
-----
.. WARNING::
The default of prepend will be changed to True in the next release of
statsmodels. We recommend to use an explicit prepend in any permanent
code.
"""
if not prepend:
import inspect
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
try:
to_warn = "prepend" not in "\n".join(info.code_context)
except: # python 2.5 compatibility
to_warn = "prepend" not in "\n".join(info[3])
if to_warn:
import warnings
warnings.warn(
"The default of `prepend` will be changed to True "
"in 0.5.0, use explicit prepend",
FutureWarning,
)
if _is_using_pandas(data, None):
# work on a copy
return _pandas_add_constant(data.copy(), prepend)
else:
data = np.asarray(data)
if not data.dtype.names:
var0 = data.var(0) == 0
if np.any(var0):
return data
data = np.column_stack((data, np.ones((data.shape[0], 1))))
if prepend:
return np.roll(data, 1, 1)
else:
return_rec = data.__class__ is np.recarray
if prepend:
ones = np.ones((data.shape[0], 1), dtype=[("const", float)])
data = nprf.append_fields(
ones,
data.dtype.names,
[data[i] for i in data.dtype.names],
usemask=False,
asrecarray=return_rec,
)
else:
data = nprf.append_fields(
data,
"const",
np.ones(data.shape[0]),
usemask=False,
asrecarray=return_rec,
)
return data
|
https://github.com/statsmodels/statsmodels/issues/260
|
$ dmesg | grep -e"Linux version"
[ 0.000000] Linux version 2.6.32-308-ec2 (buildd@crested) (gcc version 4.4.3 (Ubuntu 4.4.3-4ubuntu5) ) #15-Ubuntu SMP Thu Aug 19 04:03:34 UTC 2010 (Ubuntu 2.6.32-308.15-ec2 2.6.32.15+drm33.5)
$ python
Python 2.6.5 (r265:79063, Apr 16 2010, 13:57:41)
[GCC 4.4.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
import numpy as np
import statsmodels.api as sm
# get data
... nsample = 100
x = np.linspace(0,10, 100)
X = sm.add_constant(np.column_stack((x, x**2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.6/dist-packages/statsmodels-0.4.0-py2.6-linux-x86_64.egg/statsmodels/tools/tools.py", line 295, in add_constant
to_warn = 'prepend' not in '\n'.join(info[3])
TypeError
|
TypeError
|
def predict(self, params, exog=None, exposure=None, offset=None, linear=False):
"""
Predict response variable of a count model given exogenous variables.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
# TODO: add offset tp
if exog is None:
exog = self.exog
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
else:
if exposure is None:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
offset = 0
if not linear:
return np.exp(np.dot(exog, params) + exposure + offset) # not cdf
else:
return np.dot(exog, params) + exposure + offset
return super(CountModel, self).predict(params, exog, linear)
|
def predict(self, params, exog=None, exposure=None, offset=None, linear=False):
"""
Predict response variable of a count model given exogenous variables.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
# TODO: add offset tp
if exog is None:
exog = self.exog
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
else:
if exposure is None:
exposure = 0
else:
exposure = np.log(exposure)
if not linear:
return np.exp(np.dot(exog, params) + exposure + offset) # not cdf
else:
return np.dot(exog, params) + exposure + offset
return super(CountModel, self).predict(params, exog, linear)
|
https://github.com/statsmodels/statsmodels/issues/175
|
results3 = model.fit(start_value=-np.ones(4), method='bfgs')
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 17470.629507
Iterations: 17
Function evaluations: 32
Gradient evaluations: 31
results3.predict(xf)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new\statsmodels\statsmodels\base\model.py", line 694, in predict
return self.model.predict(self.params, exog, *args, **kwargs)
File "e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new\statsmodels\statsmodels\discrete\discrete_model.py", line 402, in predict
return np.exp(np.dot(exog, params) + exposure + offset) # not cdf
TypeError: unsupported operand type(s) for +: 'float' and 'NoneType'
results3.predict(xf, exposure=1, offset=0)
array([ 2.55643042, 2.55643042])
|
TypeError
|
def search_novel(self, query):
response = self.submit_form(search_url, {"searchword": query})
data = response.json()
results = []
for novel in data:
titleSoup = BeautifulSoup(novel["name"], "lxml")
results.append(
{
"title": titleSoup.body.text.title(),
"url": novel_page_url % novel["nameunsigned"],
"info": "Latest: %s" % novel["lastchapter"],
}
)
# end for
return results
|
def search_novel(self, query):
response = self.submit_form(search_url, {"searchword": query})
data = response.json()
results = []
for novel in data:
titleSoup = BeautifulSoup(novel["name"], "lxml")
results.append(
{
"title": titleSoup.body.text.title(),
"url": novel_page_url % novel["id_encode"],
"info": "Latest: %s" % novel["lastchapter"],
}
)
# end for
return results
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = item["permalink"]
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = self.absolute_url("https://es.mtlnovel.com/?p=%s" % item["id"])
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = item["permalink"]
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = self.absolute_url("https://fr.mtlnovel.com/?p=%s" % item["id"])
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = item["permalink"]
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = self.absolute_url("https://id.mtlnovel.com/?p=%s" % item["id"])
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def search_novel(self, query):
query = query.lower().replace(" ", "+")
soup = self.get_soup(search_url % query)
results = []
if soup.get_text(strip=True) == "Sorry! No novel founded!":
return results
# end if
for tr in soup.select("tr"):
a = tr.select("td a")
results.append(
{
"title": a[0].text.strip(),
"url": self.absolute_url(a[0]["href"]),
"info": a[1].text.strip(),
}
)
# end for
return results
|
def search_novel(self, query):
query = query.lower().replace(" ", "+")
soup = self.get_soup(search_url % query)
results = []
for tr in soup.select("tr"):
a = tr.select("td a")
results.append(
{
"title": a[0].text.strip(),
"url": self.absolute_url(a[0]["href"]),
"info": a[1].text.strip(),
}
)
# end for
return results
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def read_novel_info(self):
"""Get novel title, autor, cover etc"""
logger.debug("Visiting %s", self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_id = urlparse(self.novel_url).path.split("/")[1]
logger.info("Novel Id: %s", self.novel_id)
self.novel_title = soup.select_one(".series-details .series-name a").text.strip()
logger.info("Novel title: %s", self.novel_title)
self.novel_cover = self.absolute_url(
soup.select_one(".series-cover .content")["data-src"]
)
logger.info("Novel cover: %s", self.novel_cover)
# self.novel_author
# logger.info('Novel author: %s', self.novel_author)
page_count = 1
try:
last_page = soup.select("ul.pagingnation li a")[-1]["title"]
page_count = int(last_page.split(" ")[-1])
except Exception as _:
logger.exception("Failed to get page-count: %s", self.novel_url)
# end try
logger.info("Total pages: %d", page_count)
logger.info("Getting chapters...")
futures_to_check = {
self.executor.submit(
self.extract_chapter_list,
i + 1,
): str(i)
for i in range(page_count)
}
temp_chapters = dict()
for future in futures.as_completed(futures_to_check):
page = int(futures_to_check[future])
temp_chapters[page] = future.result()
# end for
logger.info("Building sorted chapter list...")
for page in reversed(sorted(temp_chapters.keys())):
for chap in reversed(temp_chapters[page]):
chap["id"] = len(self.chapters) + 1
chap["volume"] = len(self.chapters) // 100 + 1
self.chapters.append(chap)
# end for
# end for
self.volumes = [{"id": i + 1} for i in range(1 + len(self.chapters) // 100)]
|
def read_novel_info(self):
"""Get novel title, autor, cover etc"""
logger.debug("Visiting %s", self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_id = urlparse(self.novel_url).path.split("/")[1]
logger.info("Novel Id: %s", self.novel_id)
self.novel_title = soup.select_one(".series-details .series-name a").text.strip()
logger.info("Novel title: %s", self.novel_title)
self.novel_cover = self.absolute_url(
soup.select_one(".series-cover .content")["data-src"]
)
logger.info("Novel cover: %s", self.novel_cover)
# self.novel_author
# logger.info('Novel author: %s', self.novel_author)
page_count = 1
try:
last_page = soup.select("ul.pagingnation li a")[-1]["title"]
page_count = int(last_page.split(" ")[-1])
except Exception as err:
logger.exception("Failed to get page-count: %s", self.novel_url)
# end try
logger.info("Total pages: %d", page_count)
logger.info("Getting chapters...")
futures_to_check = {
self.executor.submit(
self.extract_chapter_list,
i + 1,
): str(i)
for i in range(page_count)
}
temp_chapters = dict()
for future in futures.as_completed(futures_to_check):
page = int(futures_to_check[future])
temp_chapters[page] = future.result()
# end for
logger.info("Building sorted chapter list...")
for page in reversed(sorted(temp_chapters.keys())):
for chap in reversed(temp_chapters[page]):
chap["id"] = len(self.chapters) + 1
chap["volume"] = len(self.chapters) // 100 + 1
self.chapters.append(chap)
# end for
# end for
self.volumes = [{"id": i + 1} for i in range(1 + len(self.chapters) // 100)]
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def search_novel(self, query):
url = search_url % quote(query.lower())
logger.debug("Visiting: %s", url)
soup = self.get_soup(url)
results = []
for li in soup.select(".book-list-info > ul > li"):
results.append(
{
"title": li.select_one("a h4 b").text.strip(),
"url": self.absolute_url(li.select_one(".book-img a")["href"]),
"info": li.select_one(".update-info").text.strip(),
}
)
# end for
return results
|
def search_novel(self, query):
url = search_url % quote(query.lower())
logger.debug("Visiting: %s", url)
soup = self.get_soup(url)
results = []
for li in soup.select(".book-list-info li"):
results.append(
{
"title": li.select_one("a h4 b").text.strip(),
"url": self.absolute_url(li.select_one(".book-img a")["href"]),
"info": li.select_one(".update-info").text.strip(),
}
)
# end for
return results
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = item["permalink"]
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
def search_novel(self, query):
query = query.lower().replace(" ", "%20")
# soup = self.get_soup(search_url % query)
list_url = search_url % query
data = self.get_json(list_url)["items"][0]["results"]
results = []
for item in data:
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item["id"])
results.append(
{
"url": url,
"title": item["title"],
"info": self.search_novel_info(url),
}
)
# end for
return results
|
https://github.com/dipu-bd/lightnovel-crawler/issues/476
|
2020-06-08 18:50:45,291 [DEBUG] (urllib3.connectionpool)
https://www.mtlnovel.com:443 "GET /wp-admin/admin-ajax.php?action=autosuggest&q=strongest%20sword%20god HTTP/1.1" 200 None
2020-06-08 18:50:45,297 [DEBUG] (SEARCH_NOVEL)
Traceback (most recent call last):
File "./src/core/novel_search.py", line 22, in get_search_result
results = instance.search_novel(user_input)
File "./src/sources/mtlnovel.py", line 23, in search_novel
url = self.absolute_url("https://www.mtlnovel.com/?p=%s" % item['id'])
KeyError: 'id'
|
KeyError
|
def read_novel_info(self):
# to get cookies and session info
self.parse_content_css(self.home_url)
# Determine cannonical novel name
path_fragments = urlparse(self.novel_url).path.split("/")
if path_fragments[1] == "books":
self.novel_hash = path_fragments[2]
else:
self.novel_hash = path_fragments[-1]
# end if
self.novel_url = novel_page_url % self.novel_hash
logger.info("Canonical name: %s", self.novel_hash)
logger.debug("Visiting %s", self.novel_url)
data = self.get_json(self.novel_url)
self.novel_id = data["data"]["id"]
logger.info("Novel ID: %s", self.novel_id)
self.novel_title = data["data"]["name"]
logger.info("Novel title: %s", self.novel_title)
self.novel_cover = data["data"]["cover"]
logger.info("Novel cover: %s", self.novel_cover)
chapter_count = int(data["data"]["releasedChapterCount"])
self.get_list_of_chapters(chapter_count)
|
def read_novel_info(self):
# to get cookies and session info
self.parse_content_css(self.home_url)
# Determine cannonical novel name
path_fragments = urlparse(self.novel_url).path.split("/")
if path_fragments[1] == "books":
self.novel_hash = path_fragments[2]
else:
self.novel_hash = path_fragments[-1]
# end if
self.novel_url = novel_page_url % self.novel_hash
logger.info("Canonical name: %s", self.novel_hash)
logger.debug("Visiting %s", self.novel_url)
data = self.get_json(self.novel_url)
self.novel_id = data["data"]["id"]
logger.info("Novel ID: %s", self.novel_id)
self.novel_title = data["data"]["name"]
logger.info("Novel title: %s", self.novel_title)
self.novel_cover = data["data"]["cover"]
logger.info("Novel cover: %s", self.novel_cover)
chapter_count = int(data["data"]["chapterCount"])
self.get_list_of_chapters(chapter_count)
|
https://github.com/dipu-bd/lightnovel-crawler/issues/465
|
Fail to get bad selectors
Traceback (most recent call last):
File "c:\python38\lib\site-packages\lncrawl\sources\babelnovel.py", line 125, in parse_content_css
data = json.loads(unquote(content[0]))
IndexError: list index out of range
! Error: 'chapterCount'
|
IndexError
|
def predict(
self, x: np.ndarray, batch_size: int = 128, **kwargs
) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Perform prediction for a batch of inputs.
:param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch
could have different lengths. A possible example of `x` could be:
`x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`.
:param batch_size: Batch size.
:param transcription_output: Indicate whether the function will produce probability or transcription as
prediction output. If transcription_output is not available, then probability
output is returned.
:type transcription_output: `bool`
:return: Predicted probability (if transcription_output False) or transcription (default, if
transcription_output is True or None):
- Probability return is a tuple of (probs, sizes), where `probs` is the probability of characters of
shape (nb_samples, seq_length, nb_classes) and `sizes` is the real sequence length of shape
(nb_samples,).
- Transcription return is a numpy array of characters. A possible example of a transcription return
is `np.array(['SIXTY ONE', 'HELLO'])`.
"""
import torch # lgtm [py/repeated-import]
x_ = np.array([x_i for x_i in x] + [np.array([0.1]), np.array([0.1, 0.2])])[:-2]
# Put the model in the eval mode
self._model.eval()
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x_, y=None, fit=False)
# Transform x into the model input space
inputs, targets, input_rates, target_sizes, batch_idx = self.transform_model_input(
x=x_preprocessed
)
# Compute real input sizes
input_sizes = input_rates.mul_(inputs.size()[-1]).int()
# Run prediction with batch processing
results = []
result_output_sizes = np.zeros(x_preprocessed.shape[0], dtype=np.int)
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
for m in range(num_batch):
# Batch indexes
begin, end = (
m * batch_size,
min((m + 1) * batch_size, x_preprocessed.shape[0]),
)
# Call to DeepSpeech model for prediction
with torch.no_grad():
outputs, output_sizes = self._model(
inputs[begin:end].to(self._device),
input_sizes[begin:end].to(self._device),
)
results.append(outputs)
result_output_sizes[begin:end] = output_sizes.detach().cpu().numpy()
# Aggregate results
result_outputs = np.zeros(
(x_preprocessed.shape[0], result_output_sizes.max(), results[0].shape[-1]),
dtype=np.float32,
)
for m in range(num_batch):
# Batch indexes
begin, end = (
m * batch_size,
min((m + 1) * batch_size, x_preprocessed.shape[0]),
)
# Overwrite results
result_outputs[begin:end, : results[m].shape[1], : results[m].shape[-1]] = (
results[m].cpu().numpy()
)
# Rearrange to the original order
result_output_sizes_ = result_output_sizes.copy()
result_outputs_ = result_outputs.copy()
result_output_sizes[batch_idx] = result_output_sizes_
result_outputs[batch_idx] = result_outputs_
# Check if users want transcription outputs
transcription_output = kwargs.get("transcription_output")
if transcription_output is False:
return result_outputs, result_output_sizes
# Now users want transcription outputs
# Compute transcription
decoded_output, _ = self.decoder.decode(
torch.tensor(result_outputs, device=self._device),
torch.tensor(result_output_sizes, device=self._device),
)
decoded_output = [do[0] for do in decoded_output]
decoded_output = np.array(decoded_output)
return decoded_output
|
def predict(
self, x: np.ndarray, batch_size: int = 128, **kwargs
) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Perform prediction for a batch of inputs.
:param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch
could have different lengths. A possible example of `x` could be:
`x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`.
:param batch_size: Batch size.
:param transcription_output: Indicate whether the function will produce probability or transcription as
prediction output. If transcription_output is not available, then probability
output is returned.
:type transcription_output: `bool`
:return: Probability (if transcription_output is None or False) or transcription (if transcription_output is
True) predictions:
- Probability return is a tuple of (probs, sizes), where `probs` is the probability of characters of
shape (nb_samples, seq_length, nb_classes) and `sizes` is the real sequence length of shape
(nb_samples,).
- Transcription return is a numpy array of characters. A possible example of a transcription return
is `np.array(['SIXTY ONE', 'HELLO'])`.
"""
import torch # lgtm [py/repeated-import]
x_ = np.array([x_i for x_i in x] + [np.array([0.1]), np.array([0.1, 0.2])])[:-2]
# Put the model in the eval mode
self._model.eval()
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x_, y=None, fit=False)
# Transform x into the model input space
inputs, targets, input_rates, target_sizes, batch_idx = self.transform_model_input(
x=x_preprocessed
)
# Compute real input sizes
input_sizes = input_rates.mul_(inputs.size()[-1]).int()
# Run prediction with batch processing
results = []
result_output_sizes = np.zeros(x_preprocessed.shape[0], dtype=np.int)
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
for m in range(num_batch):
# Batch indexes
begin, end = (
m * batch_size,
min((m + 1) * batch_size, x_preprocessed.shape[0]),
)
# Call to DeepSpeech model for prediction
with torch.no_grad():
outputs, output_sizes = self._model(
inputs[begin:end].to(self._device),
input_sizes[begin:end].to(self._device),
)
results.append(outputs)
result_output_sizes[begin:end] = output_sizes.detach().cpu().numpy()
# Aggregate results
result_outputs = np.zeros(
(x_preprocessed.shape[0], result_output_sizes.max(), results[0].shape[-1]),
dtype=np.float32,
)
for m in range(num_batch):
# Batch indexes
begin, end = (
m * batch_size,
min((m + 1) * batch_size, x_preprocessed.shape[0]),
)
# Overwrite results
result_outputs[begin:end, : results[m].shape[1], : results[m].shape[-1]] = (
results[m].cpu().numpy()
)
# Rearrange to the original order
result_output_sizes_ = result_output_sizes.copy()
result_outputs_ = result_outputs.copy()
result_output_sizes[batch_idx] = result_output_sizes_
result_outputs[batch_idx] = result_outputs_
# Check if users want transcription outputs
transcription_output = kwargs.get("transcription_output")
if transcription_output is None or transcription_output is False:
return result_outputs, result_output_sizes
# Now users want transcription outputs
# Compute transcription
decoded_output, _ = self.decoder.decode(
torch.tensor(result_outputs, device=self._device),
torch.tensor(result_output_sizes, device=self._device),
)
decoded_output = [do[0] for do in decoded_output]
decoded_output = np.array(decoded_output)
return decoded_output
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/688
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-30-c849f56466d3> in <module>
3
4 # Generate attack
----> 5 x_adv = attack_pgd.generate(np.array([x2, x3]), y=None, batch_size=2)
/opt/conda/lib/python3.7/site-packages/art/attacks/attack.py in replacement_function(self, *args, **kwargs)
72 if len(args) > 0:
73 args = tuple(lst)
---> 74 return fdict[func_name](self, *args, **kwargs)
75
76 replacement_function.__doc__ = fdict[func_name].__doc__
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py in generate(self, x, y, **kwargs)
181 """
182 logger.info("Creating adversarial samples.")
--> 183 return self._attack.generate(x=x, y=y, **kwargs)
184
185 def set_params(self, **kwargs) -> None:
/opt/conda/lib/python3.7/site-packages/art/attacks/attack.py in replacement_function(self, *args, **kwargs)
72 if len(args) > 0:
73 args = tuple(lst)
---> 74 return fdict[func_name](self, *args, **kwargs)
75
76 replacement_function.__doc__ = fdict[func_name].__doc__
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py in generate(self, x, y, **kwargs)
330 self.eps_step,
331 self._project,
--> 332 self.num_random_init > 0 and i_max_iter == 0,
333 )
334
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/fast_gradient.py in _compute(self, x, x_init, y, mask, eps, eps_step, project, random_init)
366
367 # Get perturbation
--> 368 perturbation = self._compute_perturbation(batch, batch_labels, mask_batch)
369
370 # Apply perturbation and clip
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/fast_gradient.py in _compute_perturbation(self, batch, batch_labels, mask)
280
281 # Get gradient wrt loss; invert it if attack is targeted
--> 282 grad = self.estimator.loss_gradient(batch, batch_labels) * (1 - 2 * int(self.targeted))
283
284 # Apply norm bound
/opt/conda/lib/python3.7/site-packages/art/estimators/speech_recognition/pytorch_deep_speech.py in loss_gradient(self, x, y, **kwargs)
372 # Transform data into the model input space
373 inputs, targets, input_rates, target_sizes, batch_idx = self.transform_model_input(
--> 374 x=x_preprocessed, y=y_preprocessed, compute_gradient=True
375 )
376
/opt/conda/lib/python3.7/site-packages/art/estimators/speech_recognition/pytorch_deep_speech.py in transform_model_input(self, x, y, compute_gradient, tensor_input, real_lengths)
569 target = []
570 else:
--> 571 target = list(filter(None, [label_map.get(letter) for letter in list(y[i])]))
572
573 # Push the sequence to device
/opt/conda/lib/python3.7/site-packages/art/estimators/speech_recognition/pytorch_deep_speech.py in <listcomp>(.0)
569 target = []
570 else:
--> 571 target = list(filter(None, [label_map.get(letter) for letter in list(y[i])]))
572
573 # Push the sequence to device
|
TypeError
|
def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch
could have different lengths. A possible example of `x` could be:
`x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`.
:param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different
lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`.
:return: Loss gradients of the same shape as `x`.
"""
from warpctc_pytorch import CTCLoss
x_ = np.array([x_i for x_i in x] + [np.array([0.1]), np.array([0.1, 0.2])])[:-2]
# Put the model in the training mode
self._model.train()
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x_, y, fit=False)
# Transform data into the model input space
inputs, targets, input_rates, target_sizes, batch_idx = self.transform_model_input(
x=x_preprocessed, y=y_preprocessed, compute_gradient=True
)
# Compute real input sizes
input_sizes = input_rates.mul_(inputs.size()[-1]).int()
# Call to DeepSpeech model for prediction
outputs, output_sizes = self._model(
inputs.to(self._device), input_sizes.to(self._device)
)
outputs = outputs.transpose(0, 1)
float_outputs = outputs.float()
# Loss function
criterion = CTCLoss()
loss = criterion(float_outputs, targets, output_sizes, target_sizes).to(
self._device
)
loss = loss / inputs.size(0)
# Compute gradients
if self._use_amp:
from apex import amp
with amp.scale_loss(loss, self._optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Get results
results = []
for i in range(len(x_preprocessed)):
results.append(x_preprocessed[i].grad.cpu().numpy().copy())
results = np.array(results)
if results.shape[0] == 1:
results = np.array(
[results_i for results_i in results]
+ [np.array([0.1]), np.array([0.1, 0.2])],
dtype="object",
)[:-2]
results = self._apply_preprocessing_gradient(x_, results)
if x.dtype != np.object:
results = np.array([i for i in results], dtype=x.dtype)
assert results.shape == x.shape and results.dtype == x.dtype
return results
|
def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Samples of shape (nb_samples, seq_length). Note that, it is allowable that sequences in the batch
could have different lengths. A possible example of `x` could be:
`x = np.array([np.array([0.1, 0.2, 0.1, 0.4]), np.array([0.3, 0.1])])`.
:param y: Target values of shape (nb_samples). Each sample in `y` is a string and it may possess different
lengths. A possible example of `y` could be: `y = np.array(['SIXTY ONE', 'HELLO'])`.
:return: Loss gradients of the same shape as `x`.
"""
from warpctc_pytorch import CTCLoss
x_ = np.array([x_i for x_i in x] + [np.array([0.1]), np.array([0.1, 0.2])])[:-2]
# Put the model in the training mode
self._model.train()
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x_, y, fit=False)
# Transform data into the model input space
inputs, targets, input_rates, target_sizes, batch_idx = self.transform_model_input(
x=x_preprocessed, y=y_preprocessed, compute_gradient=True
)
# Compute real input sizes
input_sizes = input_rates.mul_(inputs.size()[-1]).int()
# Call to DeepSpeech model for prediction
outputs, output_sizes = self._model(
inputs.to(self._device), input_sizes.to(self._device)
)
outputs = outputs.transpose(0, 1)
float_outputs = outputs.float()
# Loss function
criterion = CTCLoss()
loss = criterion(float_outputs, targets, output_sizes, target_sizes).to(
self._device
)
loss = loss / inputs.size(0)
# Compute gradients
if self._use_amp:
from apex import amp
with amp.scale_loss(loss, self._optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Get results
results = []
for i in range(len(x_preprocessed)):
results.append(x_preprocessed[i].grad.cpu().numpy().copy())
results = np.array(results)
if results.shape[0] == 1:
results = np.array(
[results_i for results_i in results]
+ [np.array([0.1]), np.array([0.1, 0.2])],
dtype="object",
)[:-2]
results = self._apply_preprocessing_gradient(x_, results)
return results
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/688
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-30-c849f56466d3> in <module>
3
4 # Generate attack
----> 5 x_adv = attack_pgd.generate(np.array([x2, x3]), y=None, batch_size=2)
/opt/conda/lib/python3.7/site-packages/art/attacks/attack.py in replacement_function(self, *args, **kwargs)
72 if len(args) > 0:
73 args = tuple(lst)
---> 74 return fdict[func_name](self, *args, **kwargs)
75
76 replacement_function.__doc__ = fdict[func_name].__doc__
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent.py in generate(self, x, y, **kwargs)
181 """
182 logger.info("Creating adversarial samples.")
--> 183 return self._attack.generate(x=x, y=y, **kwargs)
184
185 def set_params(self, **kwargs) -> None:
/opt/conda/lib/python3.7/site-packages/art/attacks/attack.py in replacement_function(self, *args, **kwargs)
72 if len(args) > 0:
73 args = tuple(lst)
---> 74 return fdict[func_name](self, *args, **kwargs)
75
76 replacement_function.__doc__ = fdict[func_name].__doc__
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/projected_gradient_descent/projected_gradient_descent_numpy.py in generate(self, x, y, **kwargs)
330 self.eps_step,
331 self._project,
--> 332 self.num_random_init > 0 and i_max_iter == 0,
333 )
334
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/fast_gradient.py in _compute(self, x, x_init, y, mask, eps, eps_step, project, random_init)
366
367 # Get perturbation
--> 368 perturbation = self._compute_perturbation(batch, batch_labels, mask_batch)
369
370 # Apply perturbation and clip
/opt/conda/lib/python3.7/site-packages/art/attacks/evasion/fast_gradient.py in _compute_perturbation(self, batch, batch_labels, mask)
280
281 # Get gradient wrt loss; invert it if attack is targeted
--> 282 grad = self.estimator.loss_gradient(batch, batch_labels) * (1 - 2 * int(self.targeted))
283
284 # Apply norm bound
/opt/conda/lib/python3.7/site-packages/art/estimators/speech_recognition/pytorch_deep_speech.py in loss_gradient(self, x, y, **kwargs)
372 # Transform data into the model input space
373 inputs, targets, input_rates, target_sizes, batch_idx = self.transform_model_input(
--> 374 x=x_preprocessed, y=y_preprocessed, compute_gradient=True
375 )
376
/opt/conda/lib/python3.7/site-packages/art/estimators/speech_recognition/pytorch_deep_speech.py in transform_model_input(self, x, y, compute_gradient, tensor_input, real_lengths)
569 target = []
570 else:
--> 571 target = list(filter(None, [label_map.get(letter) for letter in list(y[i])]))
572
573 # Push the sequence to device
/opt/conda/lib/python3.7/site-packages/art/estimators/speech_recognition/pytorch_deep_speech.py in <listcomp>(.0)
569 target = []
570 else:
--> 571 target = list(filter(None, [label_map.get(letter) for letter in list(y[i])]))
572
573 # Push the sequence to device
|
TypeError
|
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:return: Array of gradients of the same shape as `x`.
:rtype: `np.ndarray`
"""
import mxnet as mx
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else False
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
y_preprocessed = mx.nd.array([np.argmax(y_preprocessed, axis=1)], ctx=self._ctx).T
x_preprocessed = mx.nd.array(x_preprocessed.astype(ART_NUMPY_DTYPE), ctx=self._ctx)
x_preprocessed.attach_grad()
with mx.autograd.record(train_mode=train_mode):
preds = self._model(x_preprocessed)
loss = self._loss(preds, y_preprocessed)
loss.backward()
# Compute gradients
grads = x_preprocessed.grad.asnumpy()
grads = self._apply_preprocessing_gradient(x, grads)
assert grads.shape == x.shape
return grads
|
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:return: Array of gradients of the same shape as `x`.
:rtype: `np.ndarray`
"""
import mxnet as mx
train_mode = self._learning_phase if hasattr(self, "_learning_phase") else False
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
y_preprocessed = mx.nd.array([np.argmax(y_preprocessed, axis=1)]).T
x_preprocessed = mx.nd.array(x_preprocessed.astype(ART_NUMPY_DTYPE), ctx=self._ctx)
x_preprocessed.attach_grad()
with mx.autograd.record(train_mode=train_mode):
preds = self._model(x_preprocessed)
loss = self._loss(preds, y_preprocessed)
loss.backward()
# Compute gradients
grads = x_preprocessed.grad.asnumpy()
grads = self._apply_preprocessing_gradient(x, grads)
assert grads.shape == x.shape
return grads
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/355
|
MXNetError Traceback (most recent call last)
<timed exec> in <module>
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/art/attacks/attack.py in replacement_function(self, *args, **kwargs)
68 if len(args) > 0:
69 args = tuple(lst)
---> 70 return fdict[func_name](self, *args, **kwargs)
71
72 replacement_function.__doc__ = fdict[func_name].__doc__
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/art/attacks/evasion/projected_gradient_descent.py in generate(self, x, y, **kwargs)
158 self.eps_step,
159 self._project,
--> 160 self.num_random_init > 0 and i_max_iter == 0,
161 )
162
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/art/attacks/evasion/fast_gradient.py in _compute(self, x, x_init, y, eps, eps_step, project, random_init)
318
319 # Get perturbation
--> 320 perturbation = self._compute_perturbation(batch, batch_labels)
321
322 # Apply perturbation and clip
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/art/attacks/evasion/fast_gradient.py in _compute_perturbation(self, batch, batch_labels)
273
274 # Get gradient wrt loss; invert it if attack is targeted
--> 275 grad = self.classifier.loss_gradient(batch, batch_labels) * (1 - 2 * int(self.targeted))
276
277 # Apply norm bound
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/art/classifiers/classifier.py in replacement_function(self, *args, **kwargs)
65 if len(args) > 0:
66 args = tuple(lst)
---> 67 return fdict[func_name](self, *args, **kwargs)
68
69 replacement_function.__doc__ = fdict[func_name].__doc__
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/art/classifiers/mxnet.py in loss_gradient(self, x, y, **kwargs)
355 with mx.autograd.record(train_mode=train_mode):
356 preds = self._model(x_preprocessed)
--> 357 loss = self._loss(preds, y_preprocessed)
358
359 loss.backward()
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/gluon/block.py in __call__(self, *args)
691 hook(self, args)
692
--> 693 out = self.forward(*args)
694
695 for hook in self._forward_hooks.values():
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/gluon/block.py in forward(self, x, *args)
1156 params = {k: v.data(ctx) for k, v in self._reg_params.items()}
1157
-> 1158 return self.hybrid_forward(ndarray, x, *args, **params)
1159
1160 params = {i: j.var() for i, j in self._reg_params.items()}
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/gluon/loss.py in hybrid_forward(self, F, pred, label, sample_weight)
389 pred = log_softmax(pred, self._axis)
390 if self._sparse_label:
--> 391 loss = -pick(pred, label, axis=self._axis, keepdims=True)
392 else:
393 label = _reshape_like(F, label, pred)
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/ndarray/register.py in pick(data, index, axis, keepdims, mode, out, name, **kwargs)
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/_ctypes/ndarray.py in _imperative_invoke(handle, ndargs, keys, vals, out, is_np_op)
105 c_str_array(keys),
106 c_str_array([str(s) for s in vals]),
--> 107 ctypes.byref(out_stypes)))
108
109 create_ndarray_fn = _np_ndarray_cls if is_np_op else _ndarray_cls
~/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/base.py in check_call(ret)
253 """
254 if ret != 0:
--> 255 raise MXNetError(py_str(_LIB.MXGetLastError()))
256
257
MXNetError: [14:56:14] src/imperative/./imperative_utils.h:72: Check failed: inputs[i]->ctx().dev_mask() == ctx.dev_mask() (1 vs. 2) : Operator pick require all inputs live on the same context. But the first argument is on gpu(0) while the 2-th argument is on cpu(0)
Stack trace:
[bt] (0) /u/hesseltu/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/libmxnet.so(+0x6b8b5b) [0x2b4c5395bb5b]
[bt] (1) /u/hesseltu/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/libmxnet.so(mxnet::imperative::GetContext(nnvm::NodeAttrs const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&, mxnet::Context const&)+0x4fc) [0x2b4c56b3651c]
[bt] (2) /u/hesseltu/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/libmxnet.so(mxnet::Imperative::Invoke(mxnet::Context const&, nnvm::NodeAttrs const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&, std::vector<mxnet::NDArray*, std::allocator<mxnet::NDArray*> > const&)+0x1c0) [0x2b4c56b409a0]
[bt] (3) /u/hesseltu/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/libmxnet.so(+0x3759cef) [0x2b4c569fccef]
[bt] (4) /u/hesseltu/.pyenv/versions/3.7.6/envs/adversarialvideo/lib/python3.7/site-packages/mxnet/libmxnet.so(MXImperativeInvokeEx+0x62) [0x2b4c569fd2b2]
[bt] (5) /lib64/libffi.so.6(ffi_call_unix64+0x4c) [0x2b4c182a3dcc]
[bt] (6) /lib64/libffi.so.6(ffi_call+0x1f5) [0x2b4c182a36f5]
[bt] (7) /u/hesseltu/.pyenv/versions/3.7.6/lib/python3.7/lib-dynload/_ctypes.cpython-37m-x86_64-linux-gnu.so(_ctypes_callproc+0x283) [0x2b4c1808fa93]
[bt] (8) /u/hesseltu/.pyenv/versions/3.7.6/lib/python3.7/lib-dynload/_ctypes.cpython-37m-x86_64-linux-gnu.so(+0x8e3f) [0x2b4c18086e3f]
|
MXNetError
|
def predict(self, x, batch_size=128, raw=False, **kwargs):
"""
Perform prediction for a batch of inputs. Predictions from classifiers should only be aggregated if they all
have the same type of output (e.g., probabilities). Otherwise, use `raw=True` to get predictions from all
models without aggregation. The same option should be used for logits output, as logits are not comparable
between models and should not be aggregated.
:param x: Test set.
:type x: `np.ndarray`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of predictions of shape `(nb_inputs, nb_classes)`, or of shape
`(nb_classifiers, nb_inputs, nb_classes)` if `raw=True`.
:rtype: `np.ndarray`
"""
preds = np.array(
[
self._classifier_weights[i] * self._classifiers[i].predict(x)
for i in range(self._nb_classifiers)
]
)
if raw:
return preds
# Aggregate predictions only at probabilities level, as logits are not comparable between models
var_z = np.sum(preds, axis=0)
return var_z
|
def predict(self, x, batch_size=128, **kwargs):
"""
Perform prediction for a batch of inputs. Predictions from classifiers should only be aggregated if they all
have the same type of output (e.g., probabilities). Otherwise, use `raw=True` to get predictions from all
models without aggregation. The same option should be used for logits output, as logits are not comparable
between models and should not be aggregated.
:param x: Test set.
:type x: `np.ndarray`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of predictions of shape `(nb_inputs, nb_classes)`, or of shape
`(nb_classifiers, nb_inputs, nb_classes)` if `raw=True`.
:rtype: `np.ndarray`
"""
if "raw" in kwargs:
raw = kwargs["raw"]
else:
raise ValueError("Missing argument `raw`.")
preds = np.array(
[
self._classifier_weights[i] * self._classifiers[i].predict(x)
for i in range(self._nb_classifiers)
]
)
if raw:
return preds
# Aggregate predictions only at probabilities level, as logits are not comparable between models
var_z = np.sum(preds, axis=0)
return var_z
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/214
|
import torch
from torchvision import models
from art.classifiers import PyTorchClassifier, EnsembleClassifier
from art.attacks import ProjectedGradientDescent, HopSkipJump
# load and preprocess imagenet images
images = load_preprocess_images(...)
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
wrapped_models = []
for model in [resnet18, alexnet]:
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), .1, momentum=0.9, weight_decay=1e-4)
wrapped_model = PyTorchClassifier(model=model, loss=loss, optimizer=optimizer, input_shape=(3,224,224), nb_classes=1000)
wrapped_models.append(wrapped_model)
ensemble = EnsembleClassifier(wrapped_models)
attack = ProjectedGradientDescent(ensemble)
adv_images = attack.generate(x = images)
"""
yields
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-117-766010f0b586> in <module>
1 attack = ProjectedGradientDescent(ensemble, eps=10000)
----> 2 adv_image = attack.generate(x = images)
~/anaconda3/envs/brainscore/lib/python3.6/site-packages/art/attacks/projected_gradient_descent.py in generate(self, x, y, **kwargs)
108
109 # Use model predictions as correct outputs
--> 110 targets = get_labels_np_array(self.classifier.predict(x, batch_size=self.batch_size))
111 else:
112 targets = y
~/anaconda3/envs/brainscore/lib/python3.6/site-packages/art/classifiers/ensemble.py in predict(self, x, batch_size, **kwargs)
114 raw = kwargs['raw']
115 else:
--> 116 raise ValueError('Missing argument `raw`.')
117
118 preds = np.array([self._classifier_weights[i] * self._classifiers[i].predict(x)
ValueError: Missing argument `raw`.
"""
|
ValueError
|
def class_gradient(self, x, label=None, raw=False, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If `None`, then gradients for all
classes will be computed.
:type label: `int`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified. If `raw=True`, an additional
dimension is added at the beginning of the array, indexing the different classifiers.
:rtype: `np.ndarray`
"""
grads = np.array(
[
self._classifier_weights[i] * self._classifiers[i].class_gradient(x, label)
for i in range(self._nb_classifiers)
]
)
if raw:
return grads
return np.sum(grads, axis=0)
|
def class_gradient(self, x, label=None, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If `None`, then gradients for all
classes will be computed.
:type label: `int`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified. If `raw=True`, an additional
dimension is added at the beginning of the array, indexing the different classifiers.
:rtype: `np.ndarray`
"""
if "raw" in kwargs:
raw = kwargs["raw"]
else:
raise ValueError("Missing argument `raw`.")
grads = np.array(
[
self._classifier_weights[i] * self._classifiers[i].class_gradient(x, label)
for i in range(self._nb_classifiers)
]
)
if raw:
return grads
return np.sum(grads, axis=0)
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/214
|
import torch
from torchvision import models
from art.classifiers import PyTorchClassifier, EnsembleClassifier
from art.attacks import ProjectedGradientDescent, HopSkipJump
# load and preprocess imagenet images
images = load_preprocess_images(...)
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
wrapped_models = []
for model in [resnet18, alexnet]:
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), .1, momentum=0.9, weight_decay=1e-4)
wrapped_model = PyTorchClassifier(model=model, loss=loss, optimizer=optimizer, input_shape=(3,224,224), nb_classes=1000)
wrapped_models.append(wrapped_model)
ensemble = EnsembleClassifier(wrapped_models)
attack = ProjectedGradientDescent(ensemble)
adv_images = attack.generate(x = images)
"""
yields
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-117-766010f0b586> in <module>
1 attack = ProjectedGradientDescent(ensemble, eps=10000)
----> 2 adv_image = attack.generate(x = images)
~/anaconda3/envs/brainscore/lib/python3.6/site-packages/art/attacks/projected_gradient_descent.py in generate(self, x, y, **kwargs)
108
109 # Use model predictions as correct outputs
--> 110 targets = get_labels_np_array(self.classifier.predict(x, batch_size=self.batch_size))
111 else:
112 targets = y
~/anaconda3/envs/brainscore/lib/python3.6/site-packages/art/classifiers/ensemble.py in predict(self, x, batch_size, **kwargs)
114 raw = kwargs['raw']
115 else:
--> 116 raise ValueError('Missing argument `raw`.')
117
118 preds = np.array([self._classifier_weights[i] * self._classifiers[i].predict(x)
ValueError: Missing argument `raw`.
"""
|
ValueError
|
def loss_gradient(self, x, y, raw=False, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of gradients of the same shape as `x`. If `raw=True`, shape becomes `[nb_classifiers, x.shape]`.
:rtype: `np.ndarray`
"""
grads = np.array(
[
self._classifier_weights[i] * self._classifiers[i].loss_gradient(x, y)
for i in range(self._nb_classifiers)
]
)
if raw:
return grads
return np.sum(grads, axis=0)
|
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of gradients of the same shape as `x`. If `raw=True`, shape becomes `[nb_classifiers, x.shape]`.
:rtype: `np.ndarray`
"""
if "raw" in kwargs:
raw = kwargs["raw"]
else:
raise ValueError("Missing argument `raw`.")
grads = np.array(
[
self._classifier_weights[i] * self._classifiers[i].loss_gradient(x, y)
for i in range(self._nb_classifiers)
]
)
if raw:
return grads
return np.sum(grads, axis=0)
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/214
|
import torch
from torchvision import models
from art.classifiers import PyTorchClassifier, EnsembleClassifier
from art.attacks import ProjectedGradientDescent, HopSkipJump
# load and preprocess imagenet images
images = load_preprocess_images(...)
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
wrapped_models = []
for model in [resnet18, alexnet]:
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), .1, momentum=0.9, weight_decay=1e-4)
wrapped_model = PyTorchClassifier(model=model, loss=loss, optimizer=optimizer, input_shape=(3,224,224), nb_classes=1000)
wrapped_models.append(wrapped_model)
ensemble = EnsembleClassifier(wrapped_models)
attack = ProjectedGradientDescent(ensemble)
adv_images = attack.generate(x = images)
"""
yields
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-117-766010f0b586> in <module>
1 attack = ProjectedGradientDescent(ensemble, eps=10000)
----> 2 adv_image = attack.generate(x = images)
~/anaconda3/envs/brainscore/lib/python3.6/site-packages/art/attacks/projected_gradient_descent.py in generate(self, x, y, **kwargs)
108
109 # Use model predictions as correct outputs
--> 110 targets = get_labels_np_array(self.classifier.predict(x, batch_size=self.batch_size))
111 else:
112 targets = y
~/anaconda3/envs/brainscore/lib/python3.6/site-packages/art/classifiers/ensemble.py in predict(self, x, batch_size, **kwargs)
114 raw = kwargs['raw']
115 else:
--> 116 raise ValueError('Missing argument `raw`.')
117
118 preds = np.array([self._classifier_weights[i] * self._classifiers[i].predict(x)
ValueError: Missing argument `raw`.
"""
|
ValueError
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y_val` represents the target labels. Otherwise, the targets are
the original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
x_adv = x.astype(NUMPY_DTYPE)
(clip_min, clip_max) = self.classifier.clip_values
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y = params_cpy.pop(str("y"), None)
self.set_params(**params_cpy)
# Assert that, if attack is targeted, y_val is provided:
if self.targeted and y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# No labels provided, use model prediction as correct class
if y is None:
y = get_labels_np_array(self.classifier.predict(x, logits=False))
# Compute perturbation with implicit batching
nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))
for batch_id in range(nb_batches):
logger.debug("Processing batch %i out of %i", batch_id, nb_batches)
batch_index_1, batch_index_2 = (
batch_id * self.batch_size,
(batch_id + 1) * self.batch_size,
)
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
# The optimization is performed in tanh space to keep the
# adversarial images bounded from clip_min and clip_max.
x_batch_tanh = self._original_to_tanh(x_batch, clip_min, clip_max)
# Initialize binary search:
c = self.initial_const * np.ones(x_batch.shape[0])
c_lower_bound = np.zeros(x_batch.shape[0])
c_double = np.ones(x_batch.shape[0]) > 0
# Initialize placeholders for best l2 distance and attack found so far
best_l2dist = np.inf * np.ones(x_batch.shape[0])
best_x_adv_batch = x_batch.copy()
for bss in range(self.binary_search_steps):
logger.debug(
"Binary search step %i out of %i (c_mean==%f)",
bss,
self.binary_search_steps,
np.mean(c),
)
nb_active = int(np.sum(c < self._c_upper_bound))
logger.debug(
"Number of samples with c < _c_upper_bound: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
lr = self.learning_rate * np.ones(x_batch.shape[0])
# Initialize perturbation in tanh space:
x_adv_batch = x_batch.copy()
x_adv_batch_tanh = x_batch_tanh.copy()
z, l2dist, loss = self._loss(x_batch, x_adv_batch, y_batch, c)
attack_success = loss - l2dist <= 0
overall_attack_success = attack_success
for it in range(self.max_iter):
logger.debug("Iteration step %i out of %i", it, self.max_iter)
logger.debug("Average Loss: %f", np.mean(loss))
logger.debug("Average L2Dist: %f", np.mean(l2dist))
logger.debug("Average Margin Loss: %f", np.mean(loss - l2dist))
logger.debug(
"Current number of succeeded attacks: %i out of %i",
int(np.sum(attack_success)),
len(attack_success),
)
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
active = (c < self._c_upper_bound) & (lr > 0)
nb_active = int(np.sum(active))
logger.debug(
"Number of samples with c < _c_upper_bound and lr > 0: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
# compute gradient:
logger.debug("Compute loss gradient")
perturbation_tanh = -self._loss_gradient(
z[active],
y_batch[active],
x_batch[active],
x_adv_batch[active],
x_adv_batch_tanh[active],
c[active],
clip_min,
clip_max,
)
# perform line search to optimize perturbation
# first, halve the learning rate until perturbation actually decreases the loss:
prev_loss = loss.copy()
best_loss = loss.copy()
best_lr = np.zeros(x_batch.shape[0])
halving = np.zeros(x_batch.shape[0])
for h in range(self.max_halving):
logger.debug(
"Perform halving iteration %i out of %i", h, self.max_halving
)
do_halving = loss[active] >= prev_loss[active]
logger.debug(
"Halving to be performed on %i samples", int(np.sum(do_halving))
)
if np.sum(do_halving) == 0:
break
active_and_do_halving = active.copy()
active_and_do_halving[active] = do_halving
lr_mult = lr[active_and_do_halving]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_halving]
+ lr_mult * perturbation_tanh[do_halving]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_halving], loss[active_and_do_halving] = (
self._loss(
x_batch[active_and_do_halving],
new_x_adv_batch,
y_batch[active_and_do_halving],
c[active_and_do_halving],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[active_and_do_halving] /= 2
halving[active_and_do_halving] += 1
lr[active] *= 2
# if no halving was actually required, double the learning rate as long as this
# decreases the loss:
for d in range(self.max_doubling):
logger.debug(
"Perform doubling iteration %i out of %i", d, self.max_doubling
)
do_doubling = (halving[active] == 1) & (
loss[active] <= best_loss[active]
)
logger.debug(
"Doubling to be performed on %i samples",
int(np.sum(do_doubling)),
)
if np.sum(do_doubling) == 0:
break
active_and_do_doubling = active.copy()
active_and_do_doubling[active] = do_doubling
lr[active_and_do_doubling] *= 2
lr_mult = lr[active_and_do_doubling]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_doubling]
+ lr_mult * perturbation_tanh[do_doubling]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_doubling], loss[active_and_do_doubling] = (
self._loss(
x_batch[active_and_do_doubling],
new_x_adv_batch,
y_batch[active_and_do_doubling],
c[active_and_do_doubling],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[halving == 1] /= 2
update_adv = best_lr[active] > 0
logger.debug(
"Number of adversarial samples to be finally updated: %i",
int(np.sum(update_adv)),
)
if np.sum(update_adv) > 0:
active_and_update_adv = active.copy()
active_and_update_adv[active] = update_adv
best_lr_mult = best_lr[active_and_update_adv]
for _ in range(len(x.shape) - 1):
best_lr_mult = best_lr_mult[:, np.newaxis]
x_adv_batch_tanh[active_and_update_adv] = (
x_adv_batch_tanh[active_and_update_adv]
+ best_lr_mult * perturbation_tanh[update_adv]
)
x_adv_batch[active_and_update_adv] = self._tanh_to_original(
x_adv_batch_tanh[active_and_update_adv], clip_min, clip_max
)
(
z[active_and_update_adv],
l2dist[active_and_update_adv],
loss[active_and_update_adv],
) = self._loss(
x_batch[active_and_update_adv],
x_adv_batch[active_and_update_adv],
y_batch[active_and_update_adv],
c[active_and_update_adv],
)
attack_success = loss - l2dist <= 0
overall_attack_success = overall_attack_success | attack_success
# Update depending on attack success:
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
c_double[overall_attack_success] = False
c[overall_attack_success] = (c_lower_bound + c)[overall_attack_success] / 2
c_old = c
c[~overall_attack_success & c_double] *= 2
c[~overall_attack_success & ~c_double] += (c - c_lower_bound)[
~overall_attack_success & ~c_double
] / 2
c_lower_bound[~overall_attack_success] = c_old[~overall_attack_success]
x_adv[batch_index_1:batch_index_2] = best_x_adv_batch
adv_preds = np.argmax(self.classifier.predict(x_adv), axis=1)
if self.targeted:
rate = np.sum(adv_preds == np.argmax(y, axis=1)) / x_adv.shape[0]
else:
preds = np.argmax(self.classifier.predict(x), axis=1)
rate = np.sum(adv_preds != preds) / x_adv.shape[0]
logger.info("Success rate of C&W attack: %.2f%%", 100 * rate)
return x_adv
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y_val` represents the target labels. Otherwise, the targets are
the original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
x_adv = x.astype(NUMPY_DTYPE)
(clip_min, clip_max) = self.classifier.clip_values
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y = params_cpy.pop(str("y"), None)
self.set_params(**params_cpy)
# Assert that, if attack is targeted, y_val is provided:
if self.targeted and y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# No labels provided, use model prediction as correct class
if y is None:
y = get_labels_np_array(self.classifier.predict(x, logits=False))
# Compute perturbation with implicit batching
nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))
for batch_id in range(nb_batches):
logger.debug("Processing batch %i out of %i", batch_id, nb_batches)
batch_index_1, batch_index_2 = (
batch_id * self.batch_size,
(batch_id + 1) * self.batch_size,
)
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
# The optimization is performed in tanh space to keep the
# adversarial images bounded from clip_min and clip_max.
x_batch_tanh = self._original_to_tanh(x_batch, clip_min, clip_max)
# Initialize binary search:
c = self.initial_const * np.ones(x_batch.shape[0])
c_lower_bound = np.zeros(x_batch.shape[0])
c_double = np.ones(x_batch.shape[0]) > 0
# Initialize placeholders for best l2 distance and attack found so far
best_l2dist = np.inf * np.ones(x_batch.shape[0])
best_x_adv_batch = x_batch.copy()
for bss in range(self.binary_search_steps):
logger.debug(
"Binary search step %i out of %i (c_mean==%f)",
bss,
self.binary_search_steps,
np.mean(c),
)
nb_active = int(np.sum(c < self._c_upper_bound))
logger.debug(
"Number of samples with c < _c_upper_bound: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
lr = self.learning_rate * np.ones(x_batch.shape[0])
# Initialize perturbation in tanh space:
x_adv_batch = x_batch.copy()
x_adv_batch_tanh = x_batch_tanh.copy()
z, l2dist, loss = self._loss(x_batch, x_adv_batch, y_batch, c)
attack_success = loss - l2dist <= 0
overall_attack_success = attack_success
for it in range(self.max_iter):
logger.debug("Iteration step %i out of %i", it, self.max_iter)
logger.debug("Average Loss: %f", np.mean(loss))
logger.debug("Average L2Dist: %f", np.mean(l2dist))
logger.debug("Average Margin Loss: %f", np.mean(loss - l2dist))
logger.debug(
"Current number of succeeded attacks: %i out of %i",
int(np.sum(attack_success)),
len(attack_success),
)
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
active = (c < self._c_upper_bound) & (lr > 0)
nb_active = int(np.sum(active))
logger.debug(
"Number of samples with c < _c_upper_bound and lr > 0: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
# compute gradient:
logger.debug("Compute loss gradient")
perturbation_tanh = -self._loss_gradient(
z[active],
y_batch[active],
x_batch[active],
x_adv_batch[active],
x_adv_batch_tanh[active],
c[active],
clip_min,
clip_max,
)
# perform line search to optimize perturbation
# first, halve the learning rate until perturbation actually decreases the loss:
prev_loss = loss.copy()
best_loss = loss.copy()
best_lr = np.zeros(x_batch.shape[0])
halving = np.zeros(x_batch.shape[0])
for h in range(self.max_halving):
logger.debug(
"Perform halving iteration %i out of %i", h, self.max_halving
)
do_halving = loss[active] >= prev_loss[active]
logger.debug(
"Halving to be performed on %i samples", int(np.sum(do_halving))
)
if np.sum(do_halving) == 0:
break
active_and_do_halving = active.copy()
active_and_do_halving[active] = do_halving
lr_mult = lr[active_and_do_halving]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_halving]
+ lr_mult * perturbation_tanh[do_halving]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_halving], loss[active_and_do_halving] = (
self._loss(
x_batch[active_and_do_halving],
new_x_adv_batch,
y_batch[active_and_do_halving],
c[active_and_do_halving],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[active_and_do_halving] /= 2
halving[active_and_do_halving] += 1
lr[active] *= 2
# if no halving was actually required, double the learning rate as long as this
# decreases the loss:
for d in range(self.max_doubling):
logger.debug(
"Perform doubling iteration %i out of %i", d, self.max_doubling
)
do_doubling = (halving[active] == 1) & (
loss[active] <= best_loss[active]
)
logger.debug(
"Doubling to be performed on %i samples",
int(np.sum(do_doubling)),
)
if np.sum(do_doubling) == 0:
break
active_and_do_doubling = active.copy()
active_and_do_doubling[active] = do_doubling
lr[active_and_do_doubling] *= 2
lr_mult = lr[active_and_do_doubling]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_doubling]
+ lr_mult * perturbation_tanh[do_doubling]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_doubling], loss[active_and_do_doubling] = (
self._loss(
x_batch[active_and_do_doubling],
new_x_adv_batch,
y_batch[active_and_do_doubling],
c[active_and_do_doubling],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[halving == 1] /= 2
update_adv = best_lr[active] > 0
logger.debug(
"Number of adversarial samples to be finally updated: %i",
int(np.sum(update_adv)),
)
if np.sum(update_adv) > 0:
active_and_update_adv = active.copy()
active_and_update_adv[active] = update_adv
best_lr_mult = best_lr[active_and_update_adv]
for _ in range(len(x.shape) - 1):
best_lr_mult = best_lr_mult[:, np.newaxis]
x_adv_batch_tanh[active_and_update_adv] = (
x_adv_batch_tanh[update_adv]
+ best_lr_mult * perturbation_tanh[update_adv]
)
x_adv_batch[active_and_update_adv] = self._tanh_to_original(
x_adv_batch_tanh[active_and_update_adv], clip_min, clip_max
)
(
z[active_and_update_adv],
l2dist[active_and_update_adv],
loss[active_and_update_adv],
) = self._loss(
x_batch[active_and_update_adv],
x_adv_batch[active_and_update_adv],
y_batch[active_and_update_adv],
c[active_and_update_adv],
)
attack_success = loss - l2dist <= 0
overall_attack_success = overall_attack_success | attack_success
# Update depending on attack success:
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
c_double[overall_attack_success] = False
c[overall_attack_success] = (c_lower_bound + c)[overall_attack_success] / 2
c_old = c
c[~overall_attack_success & c_double] *= 2
c[~overall_attack_success & ~c_double] += (c - c_lower_bound)[
~overall_attack_success & ~c_double
] / 2
c_lower_bound[~overall_attack_success] = c_old[~overall_attack_success]
x_adv[batch_index_1:batch_index_2] = best_x_adv_batch
adv_preds = np.argmax(self.classifier.predict(x_adv), axis=1)
if self.targeted:
rate = np.sum(adv_preds == np.argmax(y, axis=1)) / x_adv.shape[0]
else:
preds = np.argmax(self.classifier.predict(x), axis=1)
rate = np.sum(adv_preds != preds) / x_adv.shape[0]
logger.info("Success rate of C&W attack: %.2f%%", 100 * rate)
return x_adv
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def __init__(self, classifier, expectation=None):
"""
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param expectation: An expectation over transformations to be applied when computing
classifier gradients and predictions.
:type expectation: :class:`ExpectationOverTransformations`
"""
self.classifier = classifier
self.expectation = expectation
|
def __init__(self, classifier):
"""
:param classifier: A trained model.
:type classifier: :class:`Classifier`
"""
self.classifier = classifier
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def __init__(
self,
classifier,
confidence=0.0,
targeted=True,
learning_rate=0.01,
binary_search_steps=10,
max_iter=10,
initial_const=0.01,
max_halving=5,
max_doubling=5,
batch_size=128,
expectation=None,
):
"""
Create a Carlini L_2 attack instance.
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther away,
from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better results
but are slower to converge.
:type learning_rate: `float`
:param binary_search_steps: number of times to adjust constant with binary search (positive value).
:type binary_search_steps: `int`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance and
confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
Carlini and Wagner (2016).
:type initial_const: `float`
:param max_halving: Maximum number of halving steps in the line search optimization.
:type max_halving: `int`
:param max_doubling: Maximum number of doubling steps in the line search optimization.
:type max_doubling: `int`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
:param expectation: An expectation over transformations to be applied when computing
classifier gradients and predictions.
:type expectation: :class:`ExpectationOverTransformations`
"""
super(CarliniL2Method, self).__init__(classifier)
kwargs = {
"confidence": confidence,
"targeted": targeted,
"learning_rate": learning_rate,
"binary_search_steps": binary_search_steps,
"max_iter": max_iter,
"initial_const": initial_const,
"max_halving": max_halving,
"max_doubling": max_doubling,
"batch_size": batch_size,
"expectation": expectation,
}
assert self.set_params(**kwargs)
# There are internal hyperparameters:
# Abort binary search for c if it exceeds this threshold (suggested in Carlini and Wagner (2016)):
self._c_upper_bound = 10e10
# Smooth arguments of arctanh by multiplying with this constant to avoid division by zero:
self._tanh_smoother = 0.999999
|
def __init__(
self,
classifier,
confidence=0.0,
targeted=True,
learning_rate=0.01,
binary_search_steps=10,
max_iter=10,
initial_const=0.01,
max_halving=5,
max_doubling=5,
batch_size=128,
):
"""
Create a Carlini L_2 attack instance.
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther away,
from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better results
but are slower to converge.
:type learning_rate: `float`
:param binary_search_steps: number of times to adjust constant with binary search (positive value).
:type binary_search_steps: `int`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param initial_const: The initial trade-off constant `c` to use to tune the relative importance of distance and
confidence. If `binary_search_steps` is large, the initial constant is not important, as discussed in
Carlini and Wagner (2016).
:type initial_const: `float`
:param max_halving: Maximum number of halving steps in the line search optimization.
:type max_halving: `int`
:param max_doubling: Maximum number of doubling steps in the line search optimization.
:type max_doubling: `int`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
"""
super(CarliniL2Method, self).__init__(classifier)
kwargs = {
"confidence": confidence,
"targeted": targeted,
"learning_rate": learning_rate,
"binary_search_steps": binary_search_steps,
"max_iter": max_iter,
"initial_const": initial_const,
"max_halving": max_halving,
"max_doubling": max_doubling,
"batch_size": batch_size,
}
assert self.set_params(**kwargs)
# There are internal hyperparameters:
# Abort binary search for c if it exceeds this threshold (suggested in Carlini and Wagner (2016)):
self._c_upper_bound = 10e10
# Smooth arguments of arctanh by multiplying with this constant to avoid division by zero:
self._tanh_smoother = 0.999999
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def _loss(self, x, x_adv, target, c):
"""
Compute the objective function value.
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param c: Weight of the loss term aiming for classification as target.
:type c: `float`
:return: A tuple holding the current logits, l2 distance and overall loss.
:rtype: `(float, float, float)`
"""
l2dist = np.sum(np.square(x - x_adv).reshape(x.shape[0], -1), axis=1)
z = self._predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
z_target = np.sum(z * target, axis=1)
z_other = np.max(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
# The following differs from the exact definition given in Carlini and Wagner (2016). There (page 9, left
# column, last equation), the maximum is taken over Z_other - Z_target (or Z_target - Z_other respectively)
# and -confidence. However, it doesn't seem that that would have the desired effect (loss term is <= 0 if and
# only if the difference between the logit of the target and any other class differs by at least confidence).
# Hence the rearrangement here.
if self.targeted:
# if targeted, optimize for making the target class most likely
loss = np.maximum(z_other - z_target + self.confidence, np.zeros(x.shape[0]))
else:
# if untargeted, optimize for making any other class most likely
loss = np.maximum(z_target - z_other + self.confidence, np.zeros(x.shape[0]))
return z, l2dist, c * loss + l2dist
|
def _loss(self, x, x_adv, target, c):
"""
Compute the objective function value.
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param c: Weight of the loss term aiming for classification as target.
:type c: `float`
:return: A tuple holding the current logits, l2 distance and overall loss.
:rtype: `(float, float, float)`
"""
l2dist = np.sum(np.square(x - x_adv).reshape(x.shape[0], -1), axis=1)
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
z_target = np.sum(z * target, axis=1)
z_other = np.max(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
# The following differs from the exact definition given in Carlini and Wagner (2016). There (page 9, left
# column, last equation), the maximum is taken over Z_other - Z_target (or Z_target - Z_other respectively)
# and -confidence. However, it doesn't seem that that would have the desired effect (loss term is <= 0 if and
# only if the difference between the logit of the target and any other class differs by at least confidence).
# Hence the rearrangement here.
if self.targeted:
# if targeted, optimize for making the target class most likely
loss = np.maximum(z_other - z_target + self.confidence, np.zeros(x.shape[0]))
else:
# if untargeted, optimize for making any other class most likely
loss = np.maximum(z_target - z_other + self.confidence, np.zeros(x.shape[0]))
return z, l2dist, c * loss + l2dist
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def _loss_gradient(self, z, target, x, x_adv, x_adv_tanh, c, clip_min, clip_max):
"""
Compute the gradient of the loss function.
:param z: An array with the current logits.
:type z: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param x_adv_tanh: An array with the adversarial input in tanh space.
:type x_adv_tanh: `np.ndarray`
:param c: Weight of the loss term aiming for classification as target.
:type c: `float`
:param clip_min: Minimum clipping value.
:type clip_min: `float`
:param clip_max: Maximum clipping value.
:type clip_max: `float`
:return: An array with the gradient of the loss function.
:type target: `np.ndarray`
"""
if self.targeted:
i_sub = np.argmax(target, axis=1)
i_add = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
else:
i_add = np.argmax(target, axis=1)
i_sub = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
loss_gradient = self._class_gradient(x_adv, label=i_add, logits=True)
loss_gradient -= self._class_gradient(x_adv, label=i_sub, logits=True)
loss_gradient = loss_gradient.reshape(x.shape)
c_mult = c
for _ in range(len(x.shape) - 1):
c_mult = c_mult[:, np.newaxis]
loss_gradient *= c_mult
loss_gradient += 2 * (x_adv - x)
loss_gradient *= clip_max - clip_min
loss_gradient *= (1 - np.square(np.tanh(x_adv_tanh))) / (2 * self._tanh_smoother)
return loss_gradient
|
def _loss_gradient(self, z, target, x, x_adv, x_adv_tanh, c, clip_min, clip_max):
"""
Compute the gradient of the loss function.
:param z: An array with the current logits.
:type z: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param x: An array with the original input.
:type x: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param x_adv_tanh: An array with the adversarial input in tanh space.
:type x_adv_tanh: `np.ndarray`
:param c: Weight of the loss term aiming for classification as target.
:type c: `float`
:param clip_min: Minimum clipping value.
:type clip_min: `float`
:param clip_max: Maximum clipping value.
:type clip_max: `float`
:return: An array with the gradient of the loss function.
:type target: `np.ndarray`
"""
if self.targeted:
i_sub = np.argmax(target, axis=1)
i_add = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
else:
i_add = np.argmax(target, axis=1)
i_sub = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
loss_gradient = self.classifier.class_gradient(x_adv, label=i_add, logits=True)
loss_gradient -= self.classifier.class_gradient(x_adv, label=i_sub, logits=True)
loss_gradient = loss_gradient.reshape(x.shape)
c_mult = c
for _ in range(len(x.shape) - 1):
c_mult = c_mult[:, np.newaxis]
loss_gradient *= c_mult
loss_gradient += 2 * (x_adv - x)
loss_gradient *= clip_max - clip_min
loss_gradient *= (1 - np.square(np.tanh(x_adv_tanh))) / (2 * self._tanh_smoother)
return loss_gradient
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y_val` represents the target labels. Otherwise, the targets are
the original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
x_adv = x.astype(NUMPY_DTYPE)
(clip_min, clip_max) = self.classifier.clip_values
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y = params_cpy.pop(str("y"), None)
self.set_params(**params_cpy)
# Assert that, if attack is targeted, y_val is provided:
if self.targeted and y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# No labels provided, use model prediction as correct class
if y is None:
y = get_labels_np_array(self._predict(x, logits=False))
# Compute perturbation with implicit batching
nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))
for batch_id in range(nb_batches):
logger.debug("Processing batch %i out of %i", batch_id, nb_batches)
batch_index_1, batch_index_2 = (
batch_id * self.batch_size,
(batch_id + 1) * self.batch_size,
)
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
# The optimization is performed in tanh space to keep the
# adversarial images bounded from clip_min and clip_max.
x_batch_tanh = self._original_to_tanh(x_batch, clip_min, clip_max)
# Initialize binary search:
c = self.initial_const * np.ones(x_batch.shape[0])
c_lower_bound = np.zeros(x_batch.shape[0])
c_double = np.ones(x_batch.shape[0]) > 0
# Initialize placeholders for best l2 distance and attack found so far
best_l2dist = np.inf * np.ones(x_batch.shape[0])
best_x_adv_batch = x_batch.copy()
for bss in range(self.binary_search_steps):
logger.debug(
"Binary search step %i out of %i (c_mean==%f)",
bss,
self.binary_search_steps,
np.mean(c),
)
nb_active = int(np.sum(c < self._c_upper_bound))
logger.debug(
"Number of samples with c < _c_upper_bound: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
lr = self.learning_rate * np.ones(x_batch.shape[0])
# Initialize perturbation in tanh space:
x_adv_batch = x_batch.copy()
x_adv_batch_tanh = x_batch_tanh.copy()
z, l2dist, loss = self._loss(x_batch, x_adv_batch, y_batch, c)
attack_success = loss - l2dist <= 0
overall_attack_success = attack_success
for it in range(self.max_iter):
logger.debug("Iteration step %i out of %i", it, self.max_iter)
logger.debug("Average Loss: %f", np.mean(loss))
logger.debug("Average L2Dist: %f", np.mean(l2dist))
logger.debug("Average Margin Loss: %f", np.mean(loss - l2dist))
logger.debug(
"Current number of succeeded attacks: %i out of %i",
int(np.sum(attack_success)),
len(attack_success),
)
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
active = (c < self._c_upper_bound) & (lr > 0)
nb_active = int(np.sum(active))
logger.debug(
"Number of samples with c < _c_upper_bound and lr > 0: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
# compute gradient:
logger.debug("Compute loss gradient")
perturbation_tanh = -self._loss_gradient(
z[active],
y_batch[active],
x_batch[active],
x_adv_batch[active],
x_adv_batch_tanh[active],
c[active],
clip_min,
clip_max,
)
# perform line search to optimize perturbation
# first, halve the learning rate until perturbation actually decreases the loss:
prev_loss = loss.copy()
best_loss = loss.copy()
best_lr = np.zeros(x_batch.shape[0])
halving = np.zeros(x_batch.shape[0])
for h in range(self.max_halving):
logger.debug(
"Perform halving iteration %i out of %i", h, self.max_halving
)
do_halving = loss[active] >= prev_loss[active]
logger.debug(
"Halving to be performed on %i samples", int(np.sum(do_halving))
)
if np.sum(do_halving) == 0:
break
active_and_do_halving = active.copy()
active_and_do_halving[active] = do_halving
lr_mult = lr[active_and_do_halving]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_halving]
+ lr_mult * perturbation_tanh[do_halving]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_halving], loss[active_and_do_halving] = (
self._loss(
x_batch[active_and_do_halving],
new_x_adv_batch,
y_batch[active_and_do_halving],
c[active_and_do_halving],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[active_and_do_halving] /= 2
halving[active_and_do_halving] += 1
lr[active] *= 2
# if no halving was actually required, double the learning rate as long as this
# decreases the loss:
for d in range(self.max_doubling):
logger.debug(
"Perform doubling iteration %i out of %i", d, self.max_doubling
)
do_doubling = (halving[active] == 1) & (
loss[active] <= best_loss[active]
)
logger.debug(
"Doubling to be performed on %i samples",
int(np.sum(do_doubling)),
)
if np.sum(do_doubling) == 0:
break
active_and_do_doubling = active.copy()
active_and_do_doubling[active] = do_doubling
lr[active_and_do_doubling] *= 2
lr_mult = lr[active_and_do_doubling]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_doubling]
+ lr_mult * perturbation_tanh[do_doubling]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_doubling], loss[active_and_do_doubling] = (
self._loss(
x_batch[active_and_do_doubling],
new_x_adv_batch,
y_batch[active_and_do_doubling],
c[active_and_do_doubling],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[halving == 1] /= 2
update_adv = best_lr[active] > 0
logger.debug(
"Number of adversarial samples to be finally updated: %i",
int(np.sum(update_adv)),
)
if np.sum(update_adv) > 0:
active_and_update_adv = active.copy()
active_and_update_adv[active] = update_adv
best_lr_mult = best_lr[active_and_update_adv]
for _ in range(len(x.shape) - 1):
best_lr_mult = best_lr_mult[:, np.newaxis]
x_adv_batch_tanh[active_and_update_adv] = (
x_adv_batch_tanh[active_and_update_adv]
+ best_lr_mult * perturbation_tanh[update_adv]
)
x_adv_batch[active_and_update_adv] = self._tanh_to_original(
x_adv_batch_tanh[active_and_update_adv], clip_min, clip_max
)
(
z[active_and_update_adv],
l2dist[active_and_update_adv],
loss[active_and_update_adv],
) = self._loss(
x_batch[active_and_update_adv],
x_adv_batch[active_and_update_adv],
y_batch[active_and_update_adv],
c[active_and_update_adv],
)
attack_success = loss - l2dist <= 0
overall_attack_success = overall_attack_success | attack_success
# Update depending on attack success:
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
c_double[overall_attack_success] = False
c[overall_attack_success] = (c_lower_bound + c)[overall_attack_success] / 2
c_old = c
c[~overall_attack_success & c_double] *= 2
c[~overall_attack_success & ~c_double] += (c - c_lower_bound)[
~overall_attack_success & ~c_double
] / 2
c_lower_bound[~overall_attack_success] = c_old[~overall_attack_success]
x_adv[batch_index_1:batch_index_2] = best_x_adv_batch
adv_preds = np.argmax(self._predict(x_adv), axis=1)
if self.targeted:
rate = np.sum(adv_preds == np.argmax(y, axis=1)) / x_adv.shape[0]
else:
preds = np.argmax(self._predict(x), axis=1)
rate = np.sum(adv_preds != preds) / x_adv.shape[0]
logger.info("Success rate of C&W attack: %.2f%%", 100 * rate)
return x_adv
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y_val` represents the target labels. Otherwise, the targets are
the original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
x_adv = x.astype(NUMPY_DTYPE)
(clip_min, clip_max) = self.classifier.clip_values
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y = params_cpy.pop(str("y"), None)
self.set_params(**params_cpy)
# Assert that, if attack is targeted, y_val is provided:
if self.targeted and y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# No labels provided, use model prediction as correct class
if y is None:
y = get_labels_np_array(self.classifier.predict(x, logits=False))
# Compute perturbation with implicit batching
nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))
for batch_id in range(nb_batches):
logger.debug("Processing batch %i out of %i", batch_id, nb_batches)
batch_index_1, batch_index_2 = (
batch_id * self.batch_size,
(batch_id + 1) * self.batch_size,
)
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
# The optimization is performed in tanh space to keep the
# adversarial images bounded from clip_min and clip_max.
x_batch_tanh = self._original_to_tanh(x_batch, clip_min, clip_max)
# Initialize binary search:
c = self.initial_const * np.ones(x_batch.shape[0])
c_lower_bound = np.zeros(x_batch.shape[0])
c_double = np.ones(x_batch.shape[0]) > 0
# Initialize placeholders for best l2 distance and attack found so far
best_l2dist = np.inf * np.ones(x_batch.shape[0])
best_x_adv_batch = x_batch.copy()
for bss in range(self.binary_search_steps):
logger.debug(
"Binary search step %i out of %i (c_mean==%f)",
bss,
self.binary_search_steps,
np.mean(c),
)
nb_active = int(np.sum(c < self._c_upper_bound))
logger.debug(
"Number of samples with c < _c_upper_bound: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
lr = self.learning_rate * np.ones(x_batch.shape[0])
# Initialize perturbation in tanh space:
x_adv_batch = x_batch.copy()
x_adv_batch_tanh = x_batch_tanh.copy()
z, l2dist, loss = self._loss(x_batch, x_adv_batch, y_batch, c)
attack_success = loss - l2dist <= 0
overall_attack_success = attack_success
for it in range(self.max_iter):
logger.debug("Iteration step %i out of %i", it, self.max_iter)
logger.debug("Average Loss: %f", np.mean(loss))
logger.debug("Average L2Dist: %f", np.mean(l2dist))
logger.debug("Average Margin Loss: %f", np.mean(loss - l2dist))
logger.debug(
"Current number of succeeded attacks: %i out of %i",
int(np.sum(attack_success)),
len(attack_success),
)
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
active = (c < self._c_upper_bound) & (lr > 0)
nb_active = int(np.sum(active))
logger.debug(
"Number of samples with c < _c_upper_bound and lr > 0: %i out of %i",
nb_active,
x_batch.shape[0],
)
if nb_active == 0:
break
# compute gradient:
logger.debug("Compute loss gradient")
perturbation_tanh = -self._loss_gradient(
z[active],
y_batch[active],
x_batch[active],
x_adv_batch[active],
x_adv_batch_tanh[active],
c[active],
clip_min,
clip_max,
)
# perform line search to optimize perturbation
# first, halve the learning rate until perturbation actually decreases the loss:
prev_loss = loss.copy()
best_loss = loss.copy()
best_lr = np.zeros(x_batch.shape[0])
halving = np.zeros(x_batch.shape[0])
for h in range(self.max_halving):
logger.debug(
"Perform halving iteration %i out of %i", h, self.max_halving
)
do_halving = loss[active] >= prev_loss[active]
logger.debug(
"Halving to be performed on %i samples", int(np.sum(do_halving))
)
if np.sum(do_halving) == 0:
break
active_and_do_halving = active.copy()
active_and_do_halving[active] = do_halving
lr_mult = lr[active_and_do_halving]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_halving]
+ lr_mult * perturbation_tanh[do_halving]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_halving], loss[active_and_do_halving] = (
self._loss(
x_batch[active_and_do_halving],
new_x_adv_batch,
y_batch[active_and_do_halving],
c[active_and_do_halving],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[active_and_do_halving] /= 2
halving[active_and_do_halving] += 1
lr[active] *= 2
# if no halving was actually required, double the learning rate as long as this
# decreases the loss:
for d in range(self.max_doubling):
logger.debug(
"Perform doubling iteration %i out of %i", d, self.max_doubling
)
do_doubling = (halving[active] == 1) & (
loss[active] <= best_loss[active]
)
logger.debug(
"Doubling to be performed on %i samples",
int(np.sum(do_doubling)),
)
if np.sum(do_doubling) == 0:
break
active_and_do_doubling = active.copy()
active_and_do_doubling[active] = do_doubling
lr[active_and_do_doubling] *= 2
lr_mult = lr[active_and_do_doubling]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_doubling]
+ lr_mult * perturbation_tanh[do_doubling]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh, clip_min, clip_max
)
_, l2dist[active_and_do_doubling], loss[active_and_do_doubling] = (
self._loss(
x_batch[active_and_do_doubling],
new_x_adv_batch,
y_batch[active_and_do_doubling],
c[active_and_do_doubling],
)
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("New Average L2Dist: %f", np.mean(l2dist))
logger.debug("New Average Margin Loss: %f", np.mean(loss - l2dist))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[halving == 1] /= 2
update_adv = best_lr[active] > 0
logger.debug(
"Number of adversarial samples to be finally updated: %i",
int(np.sum(update_adv)),
)
if np.sum(update_adv) > 0:
active_and_update_adv = active.copy()
active_and_update_adv[active] = update_adv
best_lr_mult = best_lr[active_and_update_adv]
for _ in range(len(x.shape) - 1):
best_lr_mult = best_lr_mult[:, np.newaxis]
x_adv_batch_tanh[active_and_update_adv] = (
x_adv_batch_tanh[active_and_update_adv]
+ best_lr_mult * perturbation_tanh[update_adv]
)
x_adv_batch[active_and_update_adv] = self._tanh_to_original(
x_adv_batch_tanh[active_and_update_adv], clip_min, clip_max
)
(
z[active_and_update_adv],
l2dist[active_and_update_adv],
loss[active_and_update_adv],
) = self._loss(
x_batch[active_and_update_adv],
x_adv_batch[active_and_update_adv],
y_batch[active_and_update_adv],
c[active_and_update_adv],
)
attack_success = loss - l2dist <= 0
overall_attack_success = overall_attack_success | attack_success
# Update depending on attack success:
improved_adv = attack_success & (l2dist < best_l2dist)
logger.debug(
"Number of improved L2 distances: %i", int(np.sum(improved_adv))
)
if np.sum(improved_adv) > 0:
best_l2dist[improved_adv] = l2dist[improved_adv]
best_x_adv_batch[improved_adv] = x_adv_batch[improved_adv]
c_double[overall_attack_success] = False
c[overall_attack_success] = (c_lower_bound + c)[overall_attack_success] / 2
c_old = c
c[~overall_attack_success & c_double] *= 2
c[~overall_attack_success & ~c_double] += (c - c_lower_bound)[
~overall_attack_success & ~c_double
] / 2
c_lower_bound[~overall_attack_success] = c_old[~overall_attack_success]
x_adv[batch_index_1:batch_index_2] = best_x_adv_batch
adv_preds = np.argmax(self.classifier.predict(x_adv), axis=1)
if self.targeted:
rate = np.sum(adv_preds == np.argmax(y, axis=1)) / x_adv.shape[0]
else:
preds = np.argmax(self.classifier.predict(x), axis=1)
rate = np.sum(adv_preds != preds) / x_adv.shape[0]
logger.info("Success rate of C&W attack: %.2f%%", 100 * rate)
return x_adv
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def __init__(
self,
classifier,
confidence=0.0,
targeted=True,
learning_rate=0.01,
max_iter=10,
max_halving=5,
max_doubling=5,
eps=0.3,
batch_size=128,
expectation=None,
):
"""
Create a Carlini L_Inf attack instance.
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther away,
from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
results but are slower to converge.
:type learning_rate: `float`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param max_halving: Maximum number of halving steps in the line search optimization.
:type max_halving: `int`
:param max_doubling: Maximum number of doubling steps in the line search optimization.
:type max_doubling: `int`
:param eps: An upper bound for the L_0 norm of the adversarial perturbation.
:type eps: `float`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
:param expectation: An expectation over transformations to be applied when computing
classifier gradients and predictions.
:type expectation: :class:`ExpectationOverTransformations`
"""
super(CarliniLInfMethod, self).__init__(classifier)
kwargs = {
"confidence": confidence,
"targeted": targeted,
"learning_rate": learning_rate,
"max_iter": max_iter,
"max_halving": max_halving,
"max_doubling": max_doubling,
"eps": eps,
"batch_size": batch_size,
"expectation": expectation,
}
assert self.set_params(**kwargs)
# There is one internal hyperparameter:
# Smooth arguments of arctanh by multiplying with this constant to avoid division by zero:
self._tanh_smoother = 0.999999
|
def __init__(
self,
classifier,
confidence=0.0,
targeted=True,
learning_rate=0.01,
max_iter=10,
max_halving=5,
max_doubling=5,
eps=0.3,
batch_size=128,
):
"""
Create a Carlini L_Inf attack instance.
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param confidence: Confidence of adversarial examples: a higher value produces examples that are farther away,
from the original input, but classified with higher confidence as the target class.
:type confidence: `float`
:param targeted: Should the attack target one specific class.
:type targeted: `bool`
:param learning_rate: The initial learning rate for the attack algorithm. Smaller values produce better
results but are slower to converge.
:type learning_rate: `float`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param max_halving: Maximum number of halving steps in the line search optimization.
:type max_halving: `int`
:param max_doubling: Maximum number of doubling steps in the line search optimization.
:type max_doubling: `int`
:param eps: An upper bound for the L_0 norm of the adversarial perturbation.
:type eps: `float`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
"""
super(CarliniLInfMethod, self).__init__(classifier)
kwargs = {
"confidence": confidence,
"targeted": targeted,
"learning_rate": learning_rate,
"max_iter": max_iter,
"max_halving": max_halving,
"max_doubling": max_doubling,
"eps": eps,
"batch_size": batch_size,
}
assert self.set_params(**kwargs)
# There is one internal hyperparameter:
# Smooth arguments of arctanh by multiplying with this constant to avoid division by zero:
self._tanh_smoother = 0.999999
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def _loss(self, x_adv, target):
"""
Compute the objective function value.
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:return: A tuple holding the current logits and overall loss.
:rtype: `(float, float)`
"""
z = self._predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
z_target = np.sum(z * target, axis=1)
z_other = np.max(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
if self.targeted:
# if targeted, optimize for making the target class most likely
loss = np.maximum(
z_other - z_target + self.confidence, np.zeros(x_adv.shape[0])
)
else:
# if untargeted, optimize for making any other class most likely
loss = np.maximum(
z_target - z_other + self.confidence, np.zeros(x_adv.shape[0])
)
return z, loss
|
def _loss(self, x_adv, target):
"""
Compute the objective function value.
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:return: A tuple holding the current logits and overall loss.
:rtype: `(float, float)`
"""
z = self.classifier.predict(np.array(x_adv, dtype=NUMPY_DTYPE), logits=True)
z_target = np.sum(z * target, axis=1)
z_other = np.max(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
if self.targeted:
# if targeted, optimize for making the target class most likely
loss = np.maximum(
z_other - z_target + self.confidence, np.zeros(x_adv.shape[0])
)
else:
# if untargeted, optimize for making any other class most likely
loss = np.maximum(
z_target - z_other + self.confidence, np.zeros(x_adv.shape[0])
)
return z, loss
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def _loss_gradient(self, z, target, x_adv, x_adv_tanh, clip_min, clip_max):
"""
Compute the gradient of the loss function.
:param z: An array with the current logits.
:type z: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param x_adv_tanh: An array with the adversarial input in tanh space.
:type x_adv_tanh: `np.ndarray`
:param clip_min: Minimum clipping values.
:type clip_min: `np.ndarray`
:param clip_max: Maximum clipping values.
:type clip_max: `np.ndarray`
:return: An array with the gradient of the loss function.
:type target: `np.ndarray`
"""
if self.targeted:
i_sub = np.argmax(target, axis=1)
i_add = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
else:
i_add = np.argmax(target, axis=1)
i_sub = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
loss_gradient = self._class_gradient(x_adv, label=i_add, logits=True)
loss_gradient -= self._class_gradient(x_adv, label=i_sub, logits=True)
loss_gradient = loss_gradient.reshape(x_adv.shape)
loss_gradient *= clip_max - clip_min
loss_gradient *= (1 - np.square(np.tanh(x_adv_tanh))) / (2 * self._tanh_smoother)
return loss_gradient
|
def _loss_gradient(self, z, target, x_adv, x_adv_tanh, clip_min, clip_max):
"""
Compute the gradient of the loss function.
:param z: An array with the current logits.
:type z: `np.ndarray`
:param target: An array with the target class (one-hot encoded).
:type target: `np.ndarray`
:param x_adv: An array with the adversarial input.
:type x_adv: `np.ndarray`
:param x_adv_tanh: An array with the adversarial input in tanh space.
:type x_adv_tanh: `np.ndarray`
:param clip_min: Minimum clipping values.
:type clip_min: `np.ndarray`
:param clip_max: Maximum clipping values.
:type clip_max: `np.ndarray`
:return: An array with the gradient of the loss function.
:type target: `np.ndarray`
"""
if self.targeted:
i_sub = np.argmax(target, axis=1)
i_add = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
else:
i_add = np.argmax(target, axis=1)
i_sub = np.argmax(
z * (1 - target) + (np.min(z, axis=1) - 1)[:, np.newaxis] * target, axis=1
)
loss_gradient = self.classifier.class_gradient(x_adv, label=i_add, logits=True)
loss_gradient -= self.classifier.class_gradient(x_adv, label=i_sub, logits=True)
loss_gradient = loss_gradient.reshape(x_adv.shape)
loss_gradient *= clip_max - clip_min
loss_gradient *= (1 - np.square(np.tanh(x_adv_tanh))) / (2 * self._tanh_smoother)
return loss_gradient
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y_val` represents the target labels. Otherwise, the targets are
the original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
x_adv = x.astype(NUMPY_DTYPE)
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y = params_cpy.pop(str("y"), None)
self.set_params(**params_cpy)
# Assert that, if attack is targeted, y_val is provided:
if self.targeted and y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# No labels provided, use model prediction as correct class
if y is None:
y = get_labels_np_array(self._predict(x, logits=False))
# Compute perturbation with implicit batching
nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))
for batch_id in range(nb_batches):
logger.debug("Processing batch %i out of %i", batch_id, nb_batches)
batch_index_1, batch_index_2 = (
batch_id * self.batch_size,
(batch_id + 1) * self.batch_size,
)
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
(clip_min_per_pixel, clip_max_per_pixel) = self.classifier.clip_values
clip_min = np.clip(x_batch - self.eps, clip_min_per_pixel, clip_max_per_pixel)
clip_max = np.clip(x_batch + self.eps, clip_min_per_pixel, clip_max_per_pixel)
# The optimization is performed in tanh space to keep the
# adversarial images bounded from clip_min and clip_max.
x_batch_tanh = self._original_to_tanh(x_batch, clip_min, clip_max)
# Initialize perturbation in tanh space:
x_adv_batch = x_batch.copy()
x_adv_batch_tanh = x_batch_tanh.copy()
# Initialize optimization:
z, loss = self._loss(x_adv_batch, y_batch)
attack_success = loss <= 0
lr = self.learning_rate * np.ones(x_batch.shape[0])
for it in range(self.max_iter):
logger.debug("Iteration step %i out of %i", it, self.max_iter)
logger.debug("Average Loss: %f", np.mean(loss))
logger.debug(
"Successful attack samples: %i out of %i",
int(np.sum(attack_success)),
x_batch.shape[0],
)
# only continue optimization for those samples where attack hasn't succeeded yet:
active = ~attack_success
if np.sum(active) == 0:
break
# compute gradient:
logger.debug("Compute loss gradient")
perturbation_tanh = -self._loss_gradient(
z[active],
y_batch[active],
x_adv_batch[active],
x_adv_batch_tanh[active],
clip_min[active],
clip_max[active],
)
# perform line search to optimize perturbation
# first, halve the learning rate until perturbation actually decreases the loss:
prev_loss = loss.copy()
best_loss = loss.copy()
best_lr = np.zeros(x_batch.shape[0])
halving = np.zeros(x_batch.shape[0])
for h in range(self.max_halving):
logger.debug(
"Perform halving iteration %i out of %i", h, self.max_halving
)
do_halving = loss[active] >= prev_loss[active]
logger.debug(
"Halving to be performed on %i samples", int(np.sum(do_halving))
)
if np.sum(do_halving) == 0:
break
active_and_do_halving = active.copy()
active_and_do_halving[active] = do_halving
lr_mult = lr[active_and_do_halving]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_halving]
+ lr_mult * perturbation_tanh[do_halving]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh,
clip_min[active_and_do_halving],
clip_max[active_and_do_halving],
)
_, loss[active_and_do_halving] = self._loss(
new_x_adv_batch, y_batch[active_and_do_halving]
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("Loss: %s", str(loss))
logger.debug("Prev_loss: %s", str(prev_loss))
logger.debug("Best_loss: %s", str(best_loss))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[active_and_do_halving] /= 2
halving[active_and_do_halving] += 1
lr[active] *= 2
# if no halving was actually required, double the learning rate as long as this
# decreases the loss:
for d in range(self.max_doubling):
logger.debug(
"Perform doubling iteration %i out of %i", d, self.max_doubling
)
do_doubling = (halving[active] == 1) & (
loss[active] <= best_loss[active]
)
logger.debug(
"Doubling to be performed on %i samples", int(np.sum(do_doubling))
)
if np.sum(do_doubling) == 0:
break
active_and_do_doubling = active.copy()
active_and_do_doubling[active] = do_doubling
lr[active_and_do_doubling] *= 2
lr_mult = lr[active_and_do_doubling]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_doubling]
+ lr_mult * perturbation_tanh[do_doubling]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh,
clip_min[active_and_do_doubling],
clip_max[active_and_do_doubling],
)
_, loss[active_and_do_doubling] = self._loss(
new_x_adv_batch, y_batch[active_and_do_doubling]
)
logger.debug("New Average Loss: %f", np.mean(loss))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[halving == 1] /= 2
update_adv = best_lr[active] > 0
logger.debug(
"Number of adversarial samples to be finally updated: %i",
int(np.sum(update_adv)),
)
if np.sum(update_adv) > 0:
active_and_update_adv = active.copy()
active_and_update_adv[active] = update_adv
best_lr_mult = best_lr[active_and_update_adv]
for _ in range(len(x.shape) - 1):
best_lr_mult = best_lr_mult[:, np.newaxis]
x_adv_batch_tanh[active_and_update_adv] = (
x_adv_batch_tanh[active_and_update_adv]
+ best_lr_mult * perturbation_tanh[update_adv]
)
x_adv_batch[active_and_update_adv] = self._tanh_to_original(
x_adv_batch_tanh[active_and_update_adv],
clip_min[active_and_update_adv],
clip_max[active_and_update_adv],
)
z[active_and_update_adv], loss[active_and_update_adv] = self._loss(
x_adv_batch[active_and_update_adv], y_batch[active_and_update_adv]
)
attack_success = loss <= 0
# Update depending on attack success:
x_adv_batch[~attack_success] = x_batch[~attack_success]
x_adv[batch_index_1:batch_index_2] = x_adv_batch
adv_preds = np.argmax(self._predict(x_adv), axis=1)
if self.targeted:
rate = np.sum(adv_preds == np.argmax(y, axis=1)) / x_adv.shape[0]
else:
preds = np.argmax(self._predict(x), axis=1)
rate = np.sum(adv_preds != preds) / x_adv.shape[0]
logger.info("Success rate of C&W attack: %.2f%%", 100 * rate)
return x_adv
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param y: If `self.targeted` is true, then `y_val` represents the target labels. Otherwise, the targets are
the original class labels.
:type y: `np.ndarray`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
x_adv = x.astype(NUMPY_DTYPE)
# Parse and save attack-specific parameters
params_cpy = dict(kwargs)
y = params_cpy.pop(str("y"), None)
self.set_params(**params_cpy)
# Assert that, if attack is targeted, y_val is provided:
if self.targeted and y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# No labels provided, use model prediction as correct class
if y is None:
y = get_labels_np_array(self.classifier.predict(x, logits=False))
# Compute perturbation with implicit batching
nb_batches = int(np.ceil(x_adv.shape[0] / float(self.batch_size)))
for batch_id in range(nb_batches):
logger.debug("Processing batch %i out of %i", batch_id, nb_batches)
batch_index_1, batch_index_2 = (
batch_id * self.batch_size,
(batch_id + 1) * self.batch_size,
)
x_batch = x_adv[batch_index_1:batch_index_2]
y_batch = y[batch_index_1:batch_index_2]
(clip_min_per_pixel, clip_max_per_pixel) = self.classifier.clip_values
clip_min = np.clip(x_batch - self.eps, clip_min_per_pixel, clip_max_per_pixel)
clip_max = np.clip(x_batch + self.eps, clip_min_per_pixel, clip_max_per_pixel)
# The optimization is performed in tanh space to keep the
# adversarial images bounded from clip_min and clip_max.
x_batch_tanh = self._original_to_tanh(x_batch, clip_min, clip_max)
# Initialize perturbation in tanh space:
x_adv_batch = x_batch.copy()
x_adv_batch_tanh = x_batch_tanh.copy()
# Initialize optimization:
z, loss = self._loss(x_adv_batch, y_batch)
attack_success = loss <= 0
lr = self.learning_rate * np.ones(x_batch.shape[0])
for it in range(self.max_iter):
logger.debug("Iteration step %i out of %i", it, self.max_iter)
logger.debug("Average Loss: %f", np.mean(loss))
logger.debug(
"Successful attack samples: %i out of %i",
int(np.sum(attack_success)),
x_batch.shape[0],
)
# only continue optimization for those samples where attack hasn't succeeded yet:
active = ~attack_success
if np.sum(active) == 0:
break
# compute gradient:
logger.debug("Compute loss gradient")
perturbation_tanh = -self._loss_gradient(
z[active],
y_batch[active],
x_adv_batch[active],
x_adv_batch_tanh[active],
clip_min[active],
clip_max[active],
)
# perform line search to optimize perturbation
# first, halve the learning rate until perturbation actually decreases the loss:
prev_loss = loss.copy()
best_loss = loss.copy()
best_lr = np.zeros(x_batch.shape[0])
halving = np.zeros(x_batch.shape[0])
for h in range(self.max_halving):
logger.debug(
"Perform halving iteration %i out of %i", h, self.max_halving
)
do_halving = loss[active] >= prev_loss[active]
logger.debug(
"Halving to be performed on %i samples", int(np.sum(do_halving))
)
if np.sum(do_halving) == 0:
break
active_and_do_halving = active.copy()
active_and_do_halving[active] = do_halving
lr_mult = lr[active_and_do_halving]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_halving]
+ lr_mult * perturbation_tanh[do_halving]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh,
clip_min[active_and_do_halving],
clip_max[active_and_do_halving],
)
_, loss[active_and_do_halving] = self._loss(
new_x_adv_batch, y_batch[active_and_do_halving]
)
logger.debug("New Average Loss: %f", np.mean(loss))
logger.debug("Loss: %s", str(loss))
logger.debug("Prev_loss: %s", str(prev_loss))
logger.debug("Best_loss: %s", str(best_loss))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[active_and_do_halving] /= 2
halving[active_and_do_halving] += 1
lr[active] *= 2
# if no halving was actually required, double the learning rate as long as this
# decreases the loss:
for d in range(self.max_doubling):
logger.debug(
"Perform doubling iteration %i out of %i", d, self.max_doubling
)
do_doubling = (halving[active] == 1) & (
loss[active] <= best_loss[active]
)
logger.debug(
"Doubling to be performed on %i samples", int(np.sum(do_doubling))
)
if np.sum(do_doubling) == 0:
break
active_and_do_doubling = active.copy()
active_and_do_doubling[active] = do_doubling
lr[active_and_do_doubling] *= 2
lr_mult = lr[active_and_do_doubling]
for _ in range(len(x.shape) - 1):
lr_mult = lr_mult[:, np.newaxis]
new_x_adv_batch_tanh = (
x_adv_batch_tanh[active_and_do_doubling]
+ lr_mult * perturbation_tanh[do_doubling]
)
new_x_adv_batch = self._tanh_to_original(
new_x_adv_batch_tanh,
clip_min[active_and_do_doubling],
clip_max[active_and_do_doubling],
)
_, loss[active_and_do_doubling] = self._loss(
new_x_adv_batch, y_batch[active_and_do_doubling]
)
logger.debug("New Average Loss: %f", np.mean(loss))
best_lr[loss < best_loss] = lr[loss < best_loss]
best_loss[loss < best_loss] = loss[loss < best_loss]
lr[halving == 1] /= 2
update_adv = best_lr[active] > 0
logger.debug(
"Number of adversarial samples to be finally updated: %i",
int(np.sum(update_adv)),
)
if np.sum(update_adv) > 0:
active_and_update_adv = active.copy()
active_and_update_adv[active] = update_adv
best_lr_mult = best_lr[active_and_update_adv]
for _ in range(len(x.shape) - 1):
best_lr_mult = best_lr_mult[:, np.newaxis]
x_adv_batch_tanh[active_and_update_adv] = (
x_adv_batch_tanh[active_and_update_adv]
+ best_lr_mult * perturbation_tanh[update_adv]
)
x_adv_batch[active_and_update_adv] = self._tanh_to_original(
x_adv_batch_tanh[active_and_update_adv],
clip_min[active_and_update_adv],
clip_max[active_and_update_adv],
)
z[active_and_update_adv], loss[active_and_update_adv] = self._loss(
x_adv_batch[active_and_update_adv], y_batch[active_and_update_adv]
)
attack_success = loss <= 0
# Update depending on attack success:
x_adv_batch[~attack_success] = x_batch[~attack_success]
x_adv[batch_index_1:batch_index_2] = x_adv_batch
adv_preds = np.argmax(self.classifier.predict(x_adv), axis=1)
if self.targeted:
rate = np.sum(adv_preds == np.argmax(y, axis=1)) / x_adv.shape[0]
else:
preds = np.argmax(self.classifier.predict(x), axis=1)
rate = np.sum(adv_preds != preds) / x_adv.shape[0]
logger.info("Success rate of C&W attack: %.2f%%", 100 * rate)
return x_adv
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def __init__(
self,
classifier,
max_iter=100,
epsilon=1e-6,
nb_grads=10,
batch_size=128,
expectation=None,
):
"""
Create a DeepFool attack instance.
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param epsilon: Overshoot parameter.
:type epsilon: `float`
:param nb_grads: The number of class gradients (top nb_grads w.r.t. prediction) to compute. This way only the
most likely classes are considered, speeding up the computation.
:type nb_grads: `int`
:param batch_size: Batch size
:type batch_size: `int`
:param expectation: An expectation over transformations to be applied when computing
classifier gradients and predictions.
:type expectation: :class:`ExpectationOverTransformations`
"""
super(DeepFool, self).__init__(classifier=classifier, expectation=expectation)
params = {
"max_iter": max_iter,
"epsilon": epsilon,
"nb_grads": nb_grads,
"batch_size": batch_size,
}
self.set_params(**params)
|
def __init__(self, classifier, max_iter=100, epsilon=1e-6):
"""
Create a DeepFool attack instance.
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param epsilon: Overshoot parameter.
:type epsilon: `float`
"""
super(DeepFool, self).__init__(classifier)
params = {"max_iter": max_iter, "epsilon": epsilon}
self.set_params(**params)
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param epsilon: Overshoot parameter.
:type epsilon: `float`
:param nb_grads: The number of class gradients (top nb_grads w.r.t. prediction) to compute. This way only the
most likely classes are considered, speeding up the computation.
:type nb_grads: `int`
:param batch_size: Batch size
:type batch_size: `int`
:param expectation: An expectation over transformations to be applied when computing
classifier gradients and predictions.
:type expectation: :class:`ExpectationOverTransformations`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
self.set_params(**kwargs)
clip_min, clip_max = self.classifier.clip_values
x_adv = x.copy()
preds = self._predict(x, logits=True)
# Determine the class labels for which to compute the gradients
use_grads_subset = self.nb_grads < self.classifier.nb_classes
if use_grads_subset:
# TODO compute set of unique labels per batch
grad_labels = np.argsort(-preds, axis=1)[:, : self.nb_grads]
labels_set = np.unique(grad_labels)
else:
labels_set = np.arange(self.classifier.nb_classes)
sorter = np.arange(len(labels_set))
# Pick a small scalar to avoid division by 0
tol = 10e-8
# Compute perturbation with implicit batching
for batch_id in range(int(np.ceil(x_adv.shape[0] / float(self.batch_size)))):
batch_index_1, batch_index_2 = (
batch_id * self.batch_size,
(batch_id + 1) * self.batch_size,
)
batch = x_adv[batch_index_1:batch_index_2]
# Get predictions and gradients for batch
f = preds[batch_index_1:batch_index_2]
fk_hat = np.argmax(f, axis=1)
if use_grads_subset:
# Compute gradients only for top predicted classes
grd = np.array(
[self._class_gradient(batch, logits=True, label=_) for _ in labels_set]
)
grd = np.squeeze(np.swapaxes(grd, 0, 2), axis=0)
else:
# Compute gradients for all classes
grd = self._class_gradient(batch, logits=True)
# Get current predictions
active_indices = np.arange(len(batch))
current_step = 0
while len(active_indices) != 0 and current_step < self.max_iter:
# Compute difference in predictions and gradients only for selected top predictions
labels_indices = sorter[np.searchsorted(labels_set, fk_hat, sorter=sorter)]
grad_diff = grd - grd[np.arange(len(grd)), labels_indices][:, None]
f_diff = f[:, labels_set] - f[np.arange(len(f)), labels_indices][:, None]
# Choose coordinate and compute perturbation
norm = (
np.linalg.norm(
grad_diff.reshape(len(grad_diff), len(labels_set), -1), axis=2
)
+ tol
)
value = np.abs(f_diff) / norm
value[np.arange(len(value)), labels_indices] = np.inf
l = np.argmin(value, axis=1)
r = (
abs(f_diff[np.arange(len(f_diff)), l])
/ (
pow(
np.linalg.norm(
grad_diff[np.arange(len(grad_diff)), l].reshape(
len(grad_diff), -1
),
axis=1,
),
2,
)
+ tol
)
)[:, None, None, None] * grad_diff[np.arange(len(grad_diff)), l]
# Add perturbation and clip result
batch[active_indices] = np.clip(
batch[active_indices] + r[active_indices], clip_min, clip_max
)
# Recompute prediction for new x
f = self._predict(batch, logits=True)
fk_i_hat = np.argmax(f, axis=1)
# Recompute gradients for new x
if use_grads_subset:
# Compute gradients only for (originally) top predicted classes
grd = np.array(
[
self._class_gradient(batch, logits=True, label=_)
for _ in labels_set
]
)
grd = np.squeeze(np.swapaxes(grd, 0, 2), axis=0)
else:
# Compute gradients for all classes
grd = self._class_gradient(batch, logits=True)
# Stop if misclassification has been achieved
active_indices = np.where(fk_i_hat != fk_hat)[0]
current_step += 1
current_step += 1
# Apply overshoot parameter
x_adv[batch_index_1:batch_index_2] = np.clip(
x_adv[batch_index_1:batch_index_2]
+ (1 + self.epsilon) * (batch - x_adv[batch_index_1:batch_index_2]),
clip_min,
clip_max,
)
preds = np.argmax(preds, axis=1)
preds_adv = np.argmax(self._predict(x_adv), axis=1)
logger.info(
"Success rate of DeepFool attack: %.2f%%",
(np.sum(preds != preds_adv) / x.shape[0]),
)
return x_adv
|
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param epsilon: Overshoot parameter.
:type epsilon: `float`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
assert self.set_params(**kwargs)
clip_min, clip_max = self.classifier.clip_values
x_adv = x.copy()
preds = self.classifier.predict(x, logits=True)
# Pick a small scalar to avoid division by 0
tol = 10e-8
for j, val in enumerate(x_adv):
xj = val[None, ...]
f = preds[j]
grd = self.classifier.class_gradient(xj, logits=True)[0]
fk_hat = np.argmax(f)
for _ in range(self.max_iter):
grad_diff = grd - grd[fk_hat]
f_diff = f - f[fk_hat]
# Choose coordinate and compute perturbation
norm = (
np.linalg.norm(
grad_diff.reshape(self.classifier.nb_classes, -1), axis=1
)
+ tol
)
value = np.abs(f_diff) / norm
value[fk_hat] = np.inf
l = np.argmin(value)
r = (
abs(f_diff[l]) / (pow(np.linalg.norm(grad_diff[l]), 2) + tol)
) * grad_diff[l]
# Add perturbation and clip result
xj = np.clip(xj + r, clip_min, clip_max)
# Recompute prediction for new xj
f = self.classifier.predict(xj, logits=True)[0]
grd = self.classifier.class_gradient(xj, logits=True)[0]
fk_i_hat = np.argmax(f)
# Stop if misclassification has been achieved
if fk_i_hat != fk_hat:
break
# Apply overshoot parameter
x_adv[j] = np.clip(
x[j] + (1 + self.epsilon) * (xj[0] - x[j]), clip_min, clip_max
)
preds = np.argmax(preds, axis=1)
preds_adv = np.argmax(self.classifier.predict(x_adv), axis=1)
logger.info(
"Success rate of DeepFool attack: %.2f%%",
(np.sum(preds != preds_adv) / x.shape[0]),
)
return x_adv
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
def set_params(self, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param epsilon: Overshoot parameter.
:type epsilon: `float`
:param nb_grads: The number of class gradients (top nb_grads w.r.t. prediction) to compute. This way only the
most likely classes are considered, speeding up the computation.
:type nb_grads: `int`
:param batch_size: Internal size of batches on which adversarial samples are generated.
:type batch_size: `int`
:param expectation: An expectation over transformations to be applied when computing
classifier gradients and predictions.
:type expectation: :class:`ExpectationOverTransformations`
"""
# Save attack-specific parameters
super(DeepFool, self).set_params(**kwargs)
if not isinstance(self.max_iter, (int, np.int)) or self.max_iter <= 0:
raise ValueError("The number of iterations must be a positive integer.")
if not isinstance(self.nb_grads, (int, np.int)) or self.nb_grads <= 0:
raise ValueError(
"The number of class gradients to compute must be a positive integer."
)
if self.epsilon < 0:
raise ValueError("The overshoot parameter must not be negative.")
if self.batch_size <= 0:
raise ValueError("The batch size `batch_size` has to be positive.")
return True
|
def set_params(self, **kwargs):
"""Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
"""
# Save attack-specific parameters
super(DeepFool, self).set_params(**kwargs)
if not isinstance(self.max_iter, (int, np.int)) or self.max_iter <= 0:
raise ValueError("The number of iterations must be a positive integer.")
if self.epsilon < 0:
raise ValueError("The overshoot parameter must not be negative.")
return True
|
https://github.com/Trusted-AI/adversarial-robustness-toolbox/issues/29
|
Traceback (most recent call last):
File "cw_pytorch.py", line 172, in <module>
x_test_adv = cl2m.generate(inputs, **params)
File "/home/weitian/anaconda3/envs/xnor/lib/python3.6/site-packages/art/attacks/carlini.py", line 380, in generate
x_adv_batch_tanh[active_and_update_adv] = x_adv_batch_tanh[update_adv] + \
IndexError: boolean index did not match indexed array along dimension 0; dimension is 18 but corresponding boolean dimension is 17
|
IndexError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.