repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
rs2/pandas | pandas/tests/extension/test_integer.py | 2 | 7327 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension import base
def make_data():
return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return integer_array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([pd.NA, 1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return integer_array([1, 2, 0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return integer_array([1, pd.NA, 0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = 1
a = 0
c = 2
na = pd.NA
return integer_array([b, b, na, na, a, a, b, c], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
@pytest.mark.skip(reason="using multiple dtypes")
def test_is_dtype_unboxes_dtype(self):
# we have multiple dtypes, so skip
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
if s.dtype.is_unsigned_integer and (op_name == "__rsub__"):
# TODO see https://github.com/pandas-dev/pandas/issues/22023
pytest.skip("unsigned subtraction gives negative values")
if (
hasattr(other, "dtype")
and not is_extension_array_dtype(other.dtype)
and pd.api.types.is_integer_dtype(other.dtype)
):
# other is np.int64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(s.dtype.numpy_dtype)
result = op(s, other)
expected = s.combine(other, op)
if op_name in ("__rtruediv__", "__truediv__", "__div__"):
expected = expected.fillna(np.nan).astype(float)
if op_name == "__rtruediv__":
# TODO reverse operators result in object dtype
result = result.astype(float)
elif op_name.startswith("__r"):
# TODO reverse operators result in object dtype
# see https://github.com/pandas-dev/pandas/issues/22024
expected = expected.astype(s.dtype)
result = result.astype(s.dtype)
else:
# combine method result in 'biggest' (int64) dtype
expected = expected.astype(s.dtype)
pass
if (op_name == "__rpow__") and isinstance(other, pd.Series):
# TODO pow on Int arrays gives different result with NA
# see https://github.com/pandas-dev/pandas/issues/22022
result = result.fillna(1)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
@pytest.mark.skip(reason="intNA does not error on ops")
def test_error(self, data, all_arithmetic_operators):
# other specific errors tested in the integer array specific tests
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
# Override to do the astype to boolean
expected = s.combine(other, op).astype("boolean")
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
# for test_concat_mixed_dtypes test
# concat of an Integer and Int coerces to object dtype
# TODO(jreback) once integrated this would
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="uses nullable integer")
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
expected.index = expected.index.astype(all_data.dtype)
self.assert_series_equal(result, expected)
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
result = getattr(s, op_name)(skipna=skipna)
if not skipna and s.isna().any():
expected = pd.NA
else:
expected = getattr(s.dropna().astype("int64"), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestParsing(base.BaseParsingTests):
pass
| bsd-3-clause |
scienceopen/transcarread | plasma_state.py | 1 | 1292 | #!/usr/bin/env python
"""
Reads output of Transcar sim, yielding Incoherent Scatter Radar plasma parameters.
python transcar2isr.py tests/data/beam52
"""
from pathlib import Path
from matplotlib.pyplot import show
from argparse import ArgumentParser
from datetime import datetime
#
import transcarread.plots as plots
import transcarread as tr
def compute(path: Path, tReq: datetime, plot_params: list, verbose: bool):
path = Path(path).expanduser().resolve()
# %% get sim parameters
datfn = path / "dir.input/DATCAR"
tctime = tr.readTranscarInput(datfn)
# %% load transcar output
iono = tr.read_tra(path, tReq)
# %% do plot
plots.plot_isr(iono, path, tctime, plot_params, verbose)
return iono, tctime
def main():
p = ArgumentParser(description="reads dir.output/transcar_output")
p.add_argument("path", help="path containing dir.output/transcar_output file")
p.add_argument("--tReq", help="time to extract data at")
p.add_argument("-v", "--verbose", help="more plots", action="store_true")
p.add_argument("-p", "--params", help="only plot these params", choices=["ne", "vi", "Ti", "Te"], nargs="+")
p = p.parse_args()
compute(p.path, p.tReq, p.params, p.verbose)
show()
if __name__ == "__main__":
main()
| gpl-3.0 |
dingocuster/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
aolindahl/polarization-monitor | offline_handler.py | 1 | 9272 | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 12:59:34 2015
@author: antlin
"""
import h5py
import numpy as np
import sys
import matplotlib.pyplot as plt
from aolPyModules import cookie_box
import lmfit
photo_roi = [[236.5, 250],
[236.5, 250],
[242.0, 260],
[242.0, 260],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250],
[236.5, 250]]
auger_roi = [226, 234]
def list_hdf5_content(group, indent=' '):
for k, v in group.iteritems():
print '{}"{}"'.format(indent, k),
if isinstance(v, h5py.Group):
print 'group with members:'
list_hdf5_content(v, indent=indent + ' ')
elif isinstance(v, h5py.Dataset):
print '\t{} {}'.format(v.shape, v.dtype)
def get_average(obj, name=None, selection=slice(None)):
if name is None:
dset = obj
else:
try:
dset = obj[name]
except KeyError as e:
print e.message
sys.exit(1)
return dset[selection].mean(axis=0)
def plot_traces(fig_num, dataset):
fig = plt.figure(fig_num)
fig.clf()
ax = None
for i in range(16):
ax = fig.add_subplot(4, 4, i+1, sharex=ax, sharey=ax)
t_axis = dataset.time_scales[i]
ax.plot(t_axis, dataset.time_amplitudes_averaged[i, :],
label='{} deg'.format(i*22.5))
photo_sl = dataset.photo_slices[i]
auger_sl = dataset.auger_slices[i]
ax.plot(t_axis[photo_sl],
dataset.time_amplitudes_averaged[i, photo_sl],
'r',
label='photo')
ax.plot(t_axis[auger_sl],
dataset.time_amplitudes_averaged[i, auger_sl],
'g',
label='auger')
ax.legend(loc='best', fontsize='small')
ax.grid(True)
ax.set_xbound(upper=280)
plt.tight_layout()
return fig
def polar_plot(dataset, fig_name=None, polar=True, ax=None,
reset_scaling=False,
fit_mask=np.ones(16, dtype=bool)):
if ax is None:
fig = plt.figure(fig_name)
fig.clf()
ax = fig.add_subplot(111, polar=polar)
else:
fig = ax.figure
phi = np.linspace(0, 2 * np.pi, 16, endpoint=False)
phi_line = np.linspace(0, 2 * np.pi, 2**10)
if reset_scaling:
det_factors = dataset.det_factors.copy()
dataset.det_factors = np.ones_like(det_factors)
auger = dataset.auger_amplitudes.mean(axis=0)
photo = dataset.photo_amplitudes.mean(axis=0)
if reset_scaling:
det_calib = auger.max() / auger
ax.plot(phi, auger, 'gx')
auger *= det_calib
ax.plot(phi, photo, 'rx')
photo *= det_calib
ax.plot(phi, auger, 'gs', label='auger')
ax.plot(phi, photo, 'ro', label='photo')
params = cookie_box.initial_params(photo)
params['beta'].vary = False
params['beta'].value = 2
lmfit.minimize(cookie_box.model_function, params,
args=(phi[fit_mask], photo[fit_mask]))
lmfit.report_fit(params)
ax.plot(phi_line, cookie_box.model_function(params, phi_line),
'-m', label='{:.1f} % lin {:.1f} deg'.format(
params['linear'].value*100,
np.rad2deg(params['tilt'].value)))
ax.grid(True)
ax.legend(loc='center', bbox_to_anchor=(0, 0), fontsize='medium')
plt.tight_layout()
if reset_scaling:
dataset.det_factors = det_factors
return fig
def get_bg_index_list(sl, nr_points):
return (range(sl.start - nr_points, sl.start) +
range(sl.stop, sl.stop + nr_points))
class DataSet(object):
"""Class to handle the a dataset contnained in an hdf5 file."""
@property
def h5_file(self):
return self._h5_file
@h5_file.setter
def h5_file(self, x):
print 'WARNING: The "h5_file" property can only be set at creation.'
def __init__(self, file_name, name=None):
"""Initialize the instance based on a file name."""
self._h5_name = file_name
self._h5_file = h5py.File(file_name, 'r')
self._name = file_name if name is None else name
self._det_factors = np.ones(16, dtype=float)
self._time_scales = np.array(())
self._time_amplitudes_averaged = np.array(())
self._time_amplitudes_averaged_selection = slice(None)
self._photo_amplitudes = np.array(())
self._auger_amplitudes = np.array(())
self.event_selection = slice(None)
def __del__(self):
self._h5_file.close()
@property
def name(self):
return self._name
def print_content(self, indent=''):
list_hdf5_content(self.h5_file, indent=indent)
def get_average(self, path, selection=slice(None)):
return get_average(self.h5_file, path, selection=selection)
@property
def time_amplitudes_averaged(self):
if (len(self._time_amplitudes_averaged) == 0 or
self._time_amplitudes_averaged_selection != self.event_selection):
group = self.h5_file['time_amplitudes']
n_detectors = len(group.keys())
self._time_amplitudes_averaged = np.array([
get_average(group, 'det_{}'.format(i),
selection=self.event_selection) for
i in range(n_detectors)])
self._time_amplitudes_averaged_selection = self.event_selection
return self._time_amplitudes_averaged
@property
def time_scales(self):
if len(self._time_scales) == 0:
group = self.h5_file['time_scales']
n_detectors = len(group.keys())
self._time_scales = np.array(
[group['det_{}'.format(i)].value for i in range(n_detectors)]
) * 1e3
return self._time_scales
@property
def photo_slices(self):
try:
return self._photo_slices
except:
print 'ERROR: ROI for photoline not set yet.'
sys.exit(1)
@photo_slices.setter
def photo_slices(self, x):
try:
iter(x[0])
except:
x = [x]*16
self._photo_slices = []
self._photo_bg_index_list = []
self._photo_bg_factors = []
for limits, t_axis in zip(x, self.time_scales):
sl = slice(t_axis.searchsorted(limits[0]),
t_axis.searchsorted(limits[1], side='right'))
bg_I = get_bg_index_list(sl, 5)
self._photo_slices.append(sl)
self._photo_bg_index_list.append(bg_I)
self._photo_bg_factors.append(float(sl.stop-sl.start) / len(bg_I))
self._photo_bg_factors = np.array(self._photo_bg_factors)
@property
def auger_slices(self):
try:
return self._auger_slices
except:
print 'ERROR: ROI for auger line not set yet.'
sys.exit(1)
@auger_slices.setter
def auger_slices(self, x):
try:
iter(x[0])
except:
x = [x]*16
self._auger_slices = []
for limits, t_axis in zip(x, self.time_scales):
self._auger_slices.append(
slice(t_axis.searchsorted(limits[0]),
t_axis.searchsorted(limits[1], side='right')))
def get_time_amplitudes(self, selections=[slice(None)]*16):
names = map(lambda x: 'time_amplitudes/det_{}'.format(x), range(16))
return [self.h5_file[name][:, selections[i]] for i, name in
enumerate(names)]
@property
def photo_amplitudes(self):
if len(self._photo_amplitudes) == 0:
raw = np.array([det.sum(1) for det in
self.get_time_amplitudes(self._photo_slices)]).T
bg = np.array(
[det.sum(1) for det in
self.get_time_amplitudes(self._photo_bg_index_list)]).T
# print 'raw:', raw.shape
# print 'gb:', bg.shape
# print 'factors:', self._photo_bg_factors.shape
self._photo_amplitudes = ((raw - bg * self._photo_bg_factors) *
self._det_factors)
return self._photo_amplitudes
@property
def auger_amplitudes(self):
if len(self._auger_amplitudes) == 0:
self._auger_amplitudes = (np.array(
[det.sum(1) for det in
self.get_time_amplitudes(self._auger_slices)]).T *
self._det_factors)
return self._auger_amplitudes
@property
def fee(self):
return self.h5_file['fee'].value
@property
def det_factors(self):
return self._det_factors
@det_factors.setter
def det_factors(self, new_val):
self._det_factors = new_val
if __name__ == '__main__':
dataset = DataSet('data/amom0115_35_1.h5')
dataset.photo_slices = photo_roi
dataset.auger_slices = auger_roi
# dataset.print_content()
plot_traces('Average traces', dataset)
polar_plot(dataset, 'Polar', reset_scaling=True)
| gpl-2.0 |
jenfly/python-practice | maps/maps.py | 1 | 2641 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from datetime import datetime
# Globe with Orthographic projection
# ----------------------------------
# lon_0, lat_0 are the center point of the projection.
# resolution = 'l' means use low resolution coastlines.
lon_0, lat_0 = -105, 40
#lon_0, lat_0 = -105, 90
plt.figure()
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0,resolution='l')
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,420.,60.))
m.drawmapboundary(fill_color='aqua')
plt.title("Full Disk Orthographic Projection")
# Hammer Projection
# -----------------
# lon_0 is central longitude of projection.
# resolution = 'c' means use crude resolution coastlines.
plt.figure()
m = Basemap(projection='hammer',lon_0=0,resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,420.,60.))
m.drawmapboundary(fill_color='aqua')
plt.title("Hammer Projection")
# Robinson Projection
# -------------------
# lon_0 is central longitude of projection.
# resolution = 'c' means use crude resolution coastlines.
plt.figure()
m = Basemap(projection='robin',lon_0=0,resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,360.,60.))
m.drawmapboundary(fill_color='aqua')
plt.title("Robinson Projection")
# Shaded Relief Map and Day/Night Shading
# --------------------------------------
lon_0, lat_0 = -60, 0
date = datetime(2014, 12, 22, 9, 55)
#date = datetime(2014, 7, 22, 9, 55)
scale = 0.2
plt.figure()
m = Basemap(projection='ortho', lat_0=lat_0, lon_0=lon_0)
m.shadedrelief(scale=scale)
m.nightshade(date)
plt.title('Shaded Relief with Day/Night')
# North and South Pole
# -----------------------------------------
lon_0 = -50
scale = 0.2
plt.figure(figsize=(7,10))
plt.subplot(211)
m = Basemap(projection='ortho', lat_0=90, lon_0=lon_0)
m.shadedrelief(scale=scale)
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,360.,60.))
plt.title('North Pole View')
plt.subplot(212)
m = Basemap(projection='ortho', lat_0=-90, lon_0=lon_0)
m.shadedrelief(scale=scale)
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,360.,60.))
plt.title('South Pole View')
# NASA's Blue Marble
# ------------------
plt.figure()
m = Basemap()
m.bluemarble()
# Etopo
# -----
plt.figure()
m = Basemap()
m.etopo()
| mit |
agoose77/hivesystem | manual/movingpanda/panda-13.py | 1 | 9853 | import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import dragonfly.scene.unbound, dragonfly.scene.bound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import dragonfly.convert.pull
import dragonfly.logic
import dragonfly.bind
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
camcenter = Spyder.Entity3D(
"camcenter",
(
Spyder.NewMaterial("white", color=(255, 255, 255)),
Spyder.Block3D((1, 1, 1), material="white"),
)
)
marker = Spyder.Entity3D(
"marker",
(
Spyder.NewMaterial("blue", color=(0, 0, 255)),
Spyder.Circle(2, origin=(0, 0, 0.1), material="blue")
)
)
del a, box
class pandawalkhive(bee.inithive):
animation = dragonfly.scene.bound.animation()
walk = dragonfly.std.variable("str")("walk")
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
setPos = dragonfly.scene.bound.setPos()
setHpr = dragonfly.scene.bound.setHpr()
interval = dragonfly.time.interval_time(18)
connect(key_w, interval.start)
connect(key_s, interval.pause)
sequence = dragonfly.time.sequence(4)(8, 1, 8, 1)
connect(interval.value, sequence.inp)
ip1 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (0, -10, 0))
connect(sequence.outp1, ip1)
connect(ip1, setPos)
connect(key_w, ip1.start)
connect(key_s, ip1.stop)
ip2 = dragonfly.time.interpolation("Coordinate")((0, 0, 0), (180, 0, 0))
connect(sequence.outp2, ip2)
connect(ip2, setHpr)
connect(key_w, ip2.start)
connect(key_s, ip2.stop)
ip3 = dragonfly.time.interpolation("Coordinate")((0, -10, 0), (0, 0, 0))
connect(sequence.outp3, ip3)
connect(ip3, setPos)
connect(key_w, ip3.start)
connect(key_s, ip3.stop)
ip4 = dragonfly.time.interpolation("Coordinate")((180, 0, 0), (0, 0, 0))
connect(sequence.outp4, ip4)
connect(ip4, setHpr)
connect(key_w, ip4.start)
connect(key_s, ip4.stop)
connect(ip4.reach_end, interval.start)
from bee.staticbind import staticbind_baseclass
class pandabind(dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind,
dragonfly.bind.bind):
bind_entity = "relative"
bind_keyboard = "indirect"
class camerabindhive(bee.inithive):
interval = dragonfly.time.interval_time(30)
sequence = dragonfly.time.sequence(2)(1, 1)
connect(interval.value, sequence.inp)
startsensor = dragonfly.sys.startsensor()
ip1 = dragonfly.time.interpolation("Coordinate")((180, -20, 0), (360, -20, 0))
ip2 = dragonfly.time.interpolation("Coordinate")((0, -20, 0), (180, -20, 0))
connect(sequence.outp1, ip1.inp)
connect(sequence.outp2, ip2.inp)
connect(startsensor, interval.start)
connect(startsensor, ip1.start)
connect(ip1.reach_end, ip1.stop)
connect(ip1.reach_end, ip2.start)
connect(ip2.reach_end, ip2.stop)
connect(ip2.reach_end, ip1.start)
connect(ip2.reach_end, interval.start)
sethpr = dragonfly.scene.bound.setHpr()
connect(ip1, sethpr)
connect(ip2, sethpr)
class camerabind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = camerabindhive
class myhive(dragonfly.pandahive.pandahive):
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
camerabind = camerabind().worker()
camcenter = dragonfly.std.variable("id")("camcenter")
connect(camcenter, camerabind.bindname)
startsensor = dragonfly.sys.startsensor()
cam = dragonfly.scene.get_camera()
camparent = dragonfly.scene.unbound.parent()
connect(cam, camparent.entityname)
connect(camcenter, camparent.entityparentname)
connect(startsensor, camparent)
cphide = dragonfly.scene.unbound.hide()
connect(camcenter, cphide)
connect(startsensor, cphide)
v_marker = dragonfly.std.variable("id")("marker")
hide_marker = dragonfly.scene.unbound.hide()
connect(v_marker, hide_marker)
show_marker = dragonfly.scene.unbound.show()
connect(v_marker, show_marker)
parent_marker = dragonfly.scene.unbound.parent()
connect(v_marker, parent_marker.entityname)
connect(startsensor, hide_marker)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id_gen = dragonfly.std.generator("id", id_generator)()
panda_id = dragonfly.std.variable("id")("")
t_panda_id_gen = dragonfly.std.transistor("id")()
connect(panda_id_gen, t_panda_id_gen)
connect(t_panda_id_gen, panda_id)
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
hivereg = dragonfly.bind.hiveregister()
c_hivereg = bee.configure("hivereg")
c_hivereg.register_hive("pandawalk", pandawalkhive)
pandabinder = pandabind().worker()
v_hivename = dragonfly.std.variable("id")("pandawalk")
w_bind = dragonfly.std.weaver(("id", "id"))()
connect(panda_id, w_bind.inp1)
connect(v_hivename, w_bind.inp2)
t_bind = dragonfly.std.transistor("id")()
connect(panda_id, t_bind)
t_bind2 = dragonfly.std.transistor(("id", "id"))()
connect(w_bind, t_bind2)
connect(t_bind2, pandabinder.bind)
sel = dragonfly.logic.selector()
connect(t_bind, sel.register_and_select)
selected = dragonfly.std.variable("id")("")
connect(t_bind, selected)
t_get_selected = dragonfly.logic.filter("trigger")()
connect(sel.empty, t_get_selected)
tt_get_selected = dragonfly.std.transistor("id")()
do_select = dragonfly.std.pushconnector("trigger")()
connect(t_get_selected.false, do_select)
connect(do_select, tt_get_selected)
connect(sel.selected, tt_get_selected)
connect(tt_get_selected, selected)
disp_sel = dragonfly.io.display("id")("Selected: ")
connect(tt_get_selected, disp_sel)
connect(selected, parent_marker.entityparentname)
connect(do_select, show_marker)
connect(do_select, parent_marker)
key_tab = dragonfly.io.keyboardsensor_trigger("TAB")
connect(key_tab, sel.select_next)
connect(key_tab, t_get_selected)
key_bsp = dragonfly.io.keyboardsensor_trigger("BACKSPACE")
connect(key_bsp, sel.select_prev)
connect(key_bsp, t_get_selected)
kill = dragonfly.std.pushconnector("trigger")()
t_kill = dragonfly.std.transistor("id")()
connect(selected, t_kill)
connect(t_kill, pandabinder.stop)
remove = dragonfly.scene.unbound.remove_actor_or_entity()
connect(t_kill, remove)
disp_kill = dragonfly.io.display("id")("Killed: ")
connect(t_kill, disp_kill)
connect(kill, t_kill)
connect(kill, sel.unregister)
connect(kill, hide_marker)
connect(kill, t_get_selected)
testkill = dragonfly.logic.filter("trigger")()
connect(sel.empty, testkill)
connect(testkill.false, kill)
key_k = dragonfly.io.keyboardsensor_trigger("K")
connect(key_k, testkill)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
trig_spawn = dragonfly.std.pushconnector("trigger")()
connect(trig_spawn, t_panda_id_gen)
connect(trig_spawn, do_spawn)
connect(trig_spawn, t_bind)
connect(trig_spawn, t_bind2)
connect(trig_spawn, do_select)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, trig_spawn)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, trig_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
wininit = bee.init("window")
wininit.camera.setPos(0, 45, 25)
wininit.camera.setHpr(180, -20, 0)
keyboardevents = dragonfly.event.sensor_match_leader("keyboard")
add_head = dragonfly.event.add_head()
head = dragonfly.convert.pull.duck("id", "event")()
connect(selected, head)
connect(keyboardevents, add_head)
connect(head, add_head)
connect(add_head, pandabinder.event)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
| bsd-2-clause |
TPeterW/Bitcoin-Price-Prediction | data_collection/flip_sheets.py | 1 | 1705 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import pandas as pd
def main():
if len(sys.argv) >= 2:
filenames = sys.argv[1:]
for filename in filenames:
flipfile(filename)
# else:
# files = ["Anoncoin.csv", "Argentum.csv", "BBQCoin.csv", "BetaCoin.csv", "BitBar.csv", "Bitcoin.csv",
# "BitShares.csv", "CasinoCoin.csv", "Catcoin.csv", "Copperlark.csv", "CraftCoin.csv", "Datacoin.csv",
# "Devcoin.csv", "Diamond.csv", "Digitalcoin.csv", "Dogecoin.csv", "EarthCoin.csv", "Elacoin.csv",
# "EZCoin.csv", "Fastcoin.csv", "Feathercoin.csv", "FlorinCoin.csv", "Franko.csv", "Freicoin.csv",
# "GameCoin.csv", "GlobalCoin.csv", "GoldCoin.csv", "GrandCoin.csv", "HoboNickels.csv", "I0Coin.csv",
# "Infinitecoin.csv", "Ixcoin.csv", "Joulecoin.csv", "Litecoin.csv", "LottoCoin.csv", "Luckycoin.csv",
# "Mastercoin.csv", "Megacoin.csv", "Memorycoin.csv", "Mincoin.csv", "Namecoin.csv", "NetCoin.csv",
# "Noirbits.csv", "Novacoin.csv", "Nxt.csv", "Orbitcoin.csv", "Peercoin.csv", "Phoenixcoin.csv",
# "Primecoin.csv", "Quark.csv", "Ripple.csv", "Spots.csv", "StableCoin.csv", "TagCoin.csv", "Terracoin.csv",
# "Tickets.csv", "Tigercoin.csv", "Unobtanium.csv", "WorldCoin.csv", "Yacoin.csv", "Zetacoin.csv"]
#
# for filename in files:
# flipfile(filename)
def flipfile(filename):
data = pd.read_csv(filename, index_col=None, header=0)
data = data.iloc[::-1]
data.to_csv(filename[:-4] + '_flipped.csv', index=False, header=True)
if __name__ == '__main__':
main() | mit |
cwu2011/scikit-learn | sklearn/utils/validation.py | 66 | 23629 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
jreback/pandas | pandas/tests/indexing/common.py | 2 | 5245 | """ common utilities """
import itertools
import numpy as np
from pandas import DataFrame, Float64Index, MultiIndex, Series, UInt64Index, date_range
import pandas._testing as tm
def _mklbl(prefix, n):
return [f"{prefix}{i}" for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base:
""" indexing comprehensive base class """
_kinds = {"series", "frame"}
_typs = {
"ints",
"uints",
"labels",
"mixed",
"ts",
"floats",
"empty",
"ts_rev",
"multi",
}
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))
self.frame_ints = DataFrame(
np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
)
self.series_uints = Series(
np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))
)
self.frame_uints = DataFrame(
np.random.randn(4, 4),
index=UInt64Index(range(0, 8, 2)),
columns=UInt64Index(range(0, 12, 3)),
)
self.series_floats = Series(
np.random.rand(4), index=Float64Index(range(0, 8, 2))
)
self.frame_floats = DataFrame(
np.random.randn(4, 4),
index=Float64Index(range(0, 8, 2)),
columns=Float64Index(range(0, 12, 3)),
)
m_idces = [
MultiIndex.from_product([[1, 2], [3, 4]]),
MultiIndex.from_product([[5, 6], [7, 8]]),
MultiIndex.from_product([[9, 10], [11, 12]]),
]
self.series_multi = Series(np.random.rand(4), index=m_idces[0])
self.frame_multi = DataFrame(
np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]
)
self.series_labels = Series(np.random.randn(4), index=list("abcd"))
self.frame_labels = DataFrame(
np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD")
)
self.series_mixed = Series(np.random.randn(4), index=[2, 4, "null", 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, "null", 8])
self.series_ts = Series(
np.random.randn(4), index=date_range("20130101", periods=4)
)
self.frame_ts = DataFrame(
np.random.randn(4, 4), index=date_range("20130101", periods=4)
)
dates_rev = date_range("20130101", periods=4).sort_values(ascending=False)
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
self.frame_empty = DataFrame()
self.series_empty = Series(dtype=object)
# form agglomerates
for kind in self._kinds:
d = {}
for typ in self._typs:
d[typ] = getattr(self, f"{kind}_{typ}")
setattr(self, kind, d)
def generate_indices(self, f, values=False):
"""
generate the indices
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = (list(range(len(ax))) for ax in axes)
return itertools.product(*axes)
def get_value(self, name, f, i, values=False):
""" return the value for the location i """
# check against values
if values:
return f.values[i]
elif name == "iat":
return f.iloc[i]
else:
assert name == "at"
return f.loc[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check against values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, method, key, typs=None, axes=None, fails=None):
def _eq(axis, obj, key):
""" compare equal for these 2 keys """
axified = _axify(obj, key, axis)
try:
getattr(obj, method).__getitem__(axified)
except (IndexError, TypeError, KeyError) as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
return
raise
if typs is None:
typs = self._typs
if axes is None:
axes = [0, 1]
else:
assert axes in [0, 1]
axes = [axes]
# check
for kind in self._kinds:
d = getattr(self, kind)
for ax in axes:
for typ in typs:
assert typ in self._typs
obj = d[typ]
if ax < obj.ndim:
_eq(axis=ax, obj=obj, key=key)
| bsd-3-clause |
mwaskom/seaborn | seaborn/regression.py | 2 | 39418 | """Plotting functions for linear models (broadly construed)."""
import copy
from textwrap import dedent
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
import statsmodels
assert statsmodels
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from . import utils
from . import algorithms as algo
from .axisgrid import FacetGrid, _facet_docs
from ._decorators import _deprecate_positional_args
__all__ = ["lmplot", "regplot", "residplot"]
class _LinearPlotter(object):
"""Base class for plotting relational data in tidy format.
To get anything useful done you'll have to inherit from this, but setup
code that can be abstracted out should be put here.
"""
def establish_variables(self, data, **kws):
"""Extract variables from data or use directly."""
self.data = data
# Validate the inputs
any_strings = any([isinstance(v, str) for v in kws.values()])
if any_strings and data is None:
raise ValueError("Must pass `data` if using named variables.")
# Set the variables
for var, val in kws.items():
if isinstance(val, str):
vector = data[val]
elif isinstance(val, list):
vector = np.asarray(val)
else:
vector = val
if vector is not None and vector.shape != (1,):
vector = np.squeeze(vector)
if np.ndim(vector) > 1:
err = "regplot inputs must be 1d"
raise ValueError(err)
setattr(self, var, vector)
def dropna(self, *vars):
"""Remove observations with missing data."""
vals = [getattr(self, var) for var in vars]
vals = [v for v in vals if v is not None]
not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
for var in vars:
val = getattr(self, var)
if val is not None:
setattr(self, var, val[not_na])
def plot(self, ax):
raise NotImplementedError
class _RegressionPlotter(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, seed=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.seed = seed
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.logx = logx
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess, logx)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Disable regression in case of singleton inputs
if len(self.x) <= 1:
self.fit_reg = False
# Save the range of the x variable for the grid later
if self.fit_reg:
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.x_ci == "sd":
sd = np.std(_y)
_ci = est - sd, est + sd
else:
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y,
func=self.x_estimator,
n_boot=self.n_boot,
units=units,
seed=self.seed)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.robust.robust_linear_model import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
elif self.logx:
yhat, yhat_boots = self.fit_logx(grid)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
def reg_func(_x, _y):
return np.linalg.pinv(_x).dot(_y)
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
def reg_func(_x, _y):
return np.polyval(np.polyfit(_x, _y, order), grid)
x, y = self.x, self.y
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
import statsmodels.genmod.generalized_linear_model as glm
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
def reg_func(_x, _y):
try:
yhat = model(_y, _x, **kwargs).fit().predict(grid)
except glm.PerfectSeparationError:
yhat = np.empty(len(grid))
yhat.fill(np.nan)
return yhat
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.nonparametric.smoothers_lowess import lowess
grid, yhat = lowess(self.y, self.x).T
return grid, yhat
def fit_logx(self, grid):
"""Fit the model in log-space."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), np.log(grid)]
def reg_func(_x, _y):
_x = np.c_[_x[:, 0], np.log(_x[:, 1])]
return np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = np.asarray(self.x)
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.percentile(x, percentiles)
else:
bins = np.ravel(bins)
dist = np.abs(np.subtract.outer(x, bins))
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return np.asarray(a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.scatter:
scatter_kws["label"] = self.label
else:
line_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = ax.plot([], [])
color = lines.get_color()
lines.remove()
else:
color = self.color
# Ensure that color is hex to avoid matplotlib weirdness
color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
# Treat the line-based markers specially, explicitly setting larger
# linewidth than is provided by the seaborn style defaults.
# This would ideally be handled better in matplotlib (i.e., distinguish
# between edgewidth for solid glyphs and linewidth for line glyphs
# but this should do for now.
line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
if self.x_estimator is None:
if "marker" in kws and kws["marker"] in line_markers:
lw = mpl.rcParams["lines.linewidth"]
else:
lw = mpl.rcParams["lines.markeredgewidth"]
kws.setdefault("linewidths", lw)
if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
edges = grid[0], grid[-1]
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
line, = ax.plot(grid, yhat, **kws)
if not self.truncate:
line.sticky_edges.x[:] = edges # Prevent mpl from adding margin
if err_bands is not None:
ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)
_regression_docs = dict(
model_api=dedent("""\
There are a number of mutually exclusive options for estimating the
regression model. See the :ref:`tutorial <regression_tutorial>` for more
information.\
"""),
regplot_vs_lmplot=dedent("""\
The :func:`regplot` and :func:`lmplot` functions are closely related, but
the former is an axes-level function while the latter is a figure-level
function that combines :func:`regplot` and :class:`FacetGrid`.\
"""),
x_estimator=dedent("""\
x_estimator : callable that maps vector -> scalar, optional
Apply this function to each unique value of ``x`` and plot the
resulting estimate. This is useful when ``x`` is a discrete variable.
If ``x_ci`` is given, this estimate will be bootstrapped and a
confidence interval will be drawn.\
"""),
x_bins=dedent("""\
x_bins : int or vector, optional
Bin the ``x`` variable into discrete bins and then estimate the central
tendency and a confidence interval. This binning only influences how
the scatterplot is drawn; the regression is still fit to the original
data. This parameter is interpreted either as the number of
evenly-sized (not necessary spaced) bins or the positions of the bin
centers. When this parameter is used, it implies that the default of
``x_estimator`` is ``numpy.mean``.\
"""),
x_ci=dedent("""\
x_ci : "ci", "sd", int in [0, 100] or None, optional
Size of the confidence interval used when plotting a central tendency
for discrete values of ``x``. If ``"ci"``, defer to the value of the
``ci`` parameter. If ``"sd"``, skip bootstrapping and show the
standard deviation of the observations in each bin.\
"""),
scatter=dedent("""\
scatter : bool, optional
If ``True``, draw a scatterplot with the underlying observations (or
the ``x_estimator`` values).\
"""),
fit_reg=dedent("""\
fit_reg : bool, optional
If ``True``, estimate and plot a regression model relating the ``x``
and ``y`` variables.\
"""),
ci=dedent("""\
ci : int in [0, 100] or None, optional
Size of the confidence interval for the regression estimate. This will
be drawn using translucent bands around the regression line. The
confidence interval is estimated using a bootstrap; for large
datasets, it may be advisable to avoid that computation by setting
this parameter to None.\
"""),
n_boot=dedent("""\
n_boot : int, optional
Number of bootstrap resamples used to estimate the ``ci``. The default
value attempts to balance time and stability; you may want to increase
this value for "final" versions of plots.\
"""),
units=dedent("""\
units : variable name in ``data``, optional
If the ``x`` and ``y`` observations are nested within sampling units,
those can be specified here. This will be taken into account when
computing the confidence intervals by performing a multilevel bootstrap
that resamples both units and observations (within unit). This does not
otherwise influence how the regression is estimated or drawn.\
"""),
seed=dedent("""\
seed : int, numpy.random.Generator, or numpy.random.RandomState, optional
Seed or random number generator for reproducible bootstrapping.\
"""),
order=dedent("""\
order : int, optional
If ``order`` is greater than 1, use ``numpy.polyfit`` to estimate a
polynomial regression.\
"""),
logistic=dedent("""\
logistic : bool, optional
If ``True``, assume that ``y`` is a binary variable and use
``statsmodels`` to estimate a logistic regression model. Note that this
is substantially more computationally intensive than linear regression,
so you may wish to decrease the number of bootstrap resamples
(``n_boot``) or set ``ci`` to None.\
"""),
lowess=dedent("""\
lowess : bool, optional
If ``True``, use ``statsmodels`` to estimate a nonparametric lowess
model (locally weighted linear regression). Note that confidence
intervals cannot currently be drawn for this kind of model.\
"""),
robust=dedent("""\
robust : bool, optional
If ``True``, use ``statsmodels`` to estimate a robust regression. This
will de-weight outliers. Note that this is substantially more
computationally intensive than standard linear regression, so you may
wish to decrease the number of bootstrap resamples (``n_boot``) or set
``ci`` to None.\
"""),
logx=dedent("""\
logx : bool, optional
If ``True``, estimate a linear regression of the form y ~ log(x), but
plot the scatterplot and regression model in the input space. Note that
``x`` must be positive for this to work.\
"""),
xy_partial=dedent("""\
{x,y}_partial : strings in ``data`` or matrices
Confounding variables to regress out of the ``x`` or ``y`` variables
before plotting.\
"""),
truncate=dedent("""\
truncate : bool, optional
If ``True``, the regression line is bounded by the data limits. If
``False``, it extends to the ``x`` axis limits.
"""),
xy_jitter=dedent("""\
{x,y}_jitter : floats, optional
Add uniform random noise of this size to either the ``x`` or ``y``
variables. The noise is added to a copy of the data after fitting the
regression, and only influences the look of the scatterplot. This can
be helpful when plotting variables that take discrete values.\
"""),
scatter_line_kws=dedent("""\
{scatter,line}_kws : dictionaries
Additional keyword arguments to pass to ``plt.scatter`` and
``plt.plot``.\
"""),
)
_regression_docs.update(_facet_docs)
@_deprecate_positional_args
def lmplot(
*,
x=None, y=None,
data=None,
hue=None, col=None, row=None, # TODO move before data once * is enforced
palette=None, col_wrap=None, height=5, aspect=1, markers="o",
sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,
legend=True, legend_out=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, seed=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,
line_kws=None, facet_kws=None, size=None,
):
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if facet_kws is None:
facet_kws = {}
def facet_kw_deprecation(key, val):
msg = (
f"{key} is deprecated from the `lmplot` function signature. "
"Please update your code to pass it using `facet_kws`."
)
if val is not None:
warnings.warn(msg, UserWarning)
facet_kws[key] = val
facet_kw_deprecation("sharex", sharex)
facet_kw_deprecation("sharey", sharey)
facet_kw_deprecation("legend_out", legend_out)
if data is None:
raise TypeError("Missing required keyword argument `data`.")
# Reduce the dataframe to only needed columns
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(
data, row=row, col=col, hue=hue,
palette=palette,
row_order=row_order, col_order=col_order, hue_order=hue_order,
height=height, aspect=aspect, col_wrap=col_wrap,
**facet_kws,
)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singleton or a list of markers "
"for each level of the hue variable"))
facets.hue_kws = {"marker": markers}
def update_datalim(data, x, y, ax, **kws):
xys = data[[x, y]].to_numpy().astype(float)
ax.update_datalim(xys, updatey=False)
ax.autoscale_view(scaley=False)
facets.map_dataframe(update_datalim, x=x, y=y)
# Draw the regression plot on each facet
regplot_kws = dict(
x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
seed=seed, order=order, logistic=logistic, lowess=lowess,
robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,
truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,
scatter_kws=scatter_kws, line_kws=line_kws,
)
facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)
facets.set_axis_labels(x, y)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets
lmplot.__doc__ = dedent("""\
Plot data and regression model fits across a FacetGrid.
This function combines :func:`regplot` and :class:`FacetGrid`. It is
intended as a convenient interface to fit regression models across
conditional subsets of a dataset.
When thinking about how to assign variables to different facets, a general
rule is that it makes sense to use ``hue`` for the most important
comparison, followed by ``col`` and ``row``. However, always think about
your particular dataset and the goals of the visualization you are
creating.
{model_api}
The parameters to this function span most of the options in
:class:`FacetGrid`, although there may be occasional cases where you will
want to use that class and :func:`regplot` directly.
Parameters
----------
x, y : strings, optional
Input variables; these should be column names in ``data``.
{data}
hue, col, row : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to control
the order of levels of this variable.
{palette}
{col_wrap}
{height}
{aspect}
markers : matplotlib marker code or list of marker codes, optional
Markers for the scatterplot. If a list, each marker in the list will be
used for each level of the ``hue`` variable.
{share_xy}
.. deprecated:: 0.12.0
Pass using the `facet_kws` dictionary.
{{hue,col,row}}_order : lists, optional
Order for the levels of the faceting variables. By default, this will
be the order that the levels appear in ``data`` or, if the variables
are pandas categoricals, the category order.
legend : bool, optional
If ``True`` and there is a ``hue`` variable, add a legend.
{legend_out}
.. deprecated:: 0.12.0
Pass using the `facet_kws` dictionary.
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
{scatter_line_kws}
facet_kws : dict
Dictionary of keyword arguments for :class:`FacetGrid`.
See Also
--------
regplot : Plot data and a conditional model fit.
FacetGrid : Subplot grid for plotting conditional relationships.
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
Notes
-----
{regplot_vs_lmplot}
Examples
--------
These examples focus on basic regression model plots to exhibit the
various faceting options; see the :func:`regplot` docs for demonstrations
of the other options for plotting the data and models. There are also
other examples for how to manipulate plot using the returned object on
the :class:`FacetGrid` docs.
Plot a simple linear relationship between two variables:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set_theme(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.lmplot(x="total_bill", y="tip", data=tips)
Condition on a third variable and plot the levels in different colors:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips)
Use different markers as well as colors so the plot will reproduce to
black-and-white more easily:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... markers=["o", "x"])
Use a different color palette:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette="Set1")
Map ``hue`` levels to colors with a dictionary:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette=dict(Yes="g", No="m"))
Plot the levels of the third variable across different columns:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="smoker", data=tips)
Change the height and aspect ratio of the facets:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="size", y="total_bill", hue="day", col="day",
... data=tips, height=6, aspect=.4, x_jitter=.1)
Wrap the levels of the column variable into multiple rows:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="day", hue="day",
... data=tips, col_wrap=2, height=3)
Condition on two variables to make a full grid:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, height=3)
Use methods on the returned :class:`FacetGrid` instance to further tweak
the plot:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, height=3)
>>> g = (g.set_axis_labels("Total bill (US Dollars)", "Tip")
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10])
... .fig.subplots_adjust(wspace=.02))
""").format(**_regression_docs)
@_deprecate_positional_args
def regplot(
*,
x=None, y=None,
data=None,
x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
seed=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=True, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None
):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units, seed,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to either the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
Plot the relationship between two variables in a DataFrame:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set_theme(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> ax = sns.regplot(x="total_bill", y="tip", data=tips)
Plot with two variables defined as numpy arrays; use a different color:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(8)
>>> mean, cov = [4, 6], [(1.5, .7), (.7, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 80).T
>>> ax = sns.regplot(x=x, y=y, color="g")
Plot with two variables defined as pandas Series; use a different marker:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x, y = pd.Series(x, name="x_var"), pd.Series(y, name="y_var")
>>> ax = sns.regplot(x=x, y=y, marker="+")
Use a 68% confidence interval, which corresponds with the standard error
of the estimate, and extend the regression line to the axis limits:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, ci=68, truncate=False)
Plot with a discrete ``x`` variable and add some jitter:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips, x_jitter=.1)
Plot with a discrete ``x`` variable showing means and confidence intervals
for unique values:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean)
Plot with a continuous variable divided into discrete bins:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, x_bins=4)
Fit a higher-order polynomial regression:
.. plot::
:context: close-figs
>>> ans = sns.load_dataset("anscombe")
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "II"],
... scatter_kws={{"s": 80}},
... order=2, ci=None)
Fit a robust regression and don't plot a confidence interval:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "III"],
... scatter_kws={{"s": 80}},
... robust=True, ci=None)
Fit a logistic regression; jitter the y variable and use fewer bootstrap
iterations:
.. plot::
:context: close-figs
>>> tips["big_tip"] = (tips.tip / tips.total_bill) > .175
>>> ax = sns.regplot(x="total_bill", y="big_tip", data=tips,
... logistic=True, n_boot=500, y_jitter=.03)
Fit the regression model using log(x):
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean, logx=True)
""").format(**_regression_docs)
@_deprecate_positional_args
def residplot(
*,
x=None, y=None,
data=None,
lowess=False, x_partial=None, y_partial=None,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None
):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot : Draw a :func:`residplot` with univariate marginal distributions
(when used with ``kind="resid"``).
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws.copy()
line_kws = {} if line_kws is None else line_kws.copy()
plotter.plot(ax, scatter_kws, line_kws)
return ax
| bsd-3-clause |
JeanKossaifi/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
ronekko/spatial_transformer_network | main.py | 1 | 8637 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 21:17:12 2015
@author: sakurai
"""
import argparse
import time
import copy
import numpy as np
import matplotlib.pyplot as plt
import chainer.functions as F
from chainer import optimizers
from chainer import Variable, FunctionSet
from chainer import cuda
import spatial_transformer_network as stm
np.random.seed(0)
def forward(model, x_batch, train=False):
x = Variable(x_batch, volatile=not train)
x_st, theta, points = model.st(x, True)
h = F.relu(model.fc1(x_st))
h = F.dropout(h, train=train)
h = F.relu(model.fc2(h))
h = F.dropout(h, train=train)
y = model.fc3(h)
return y, x_st, theta, points
class SpatialTransformerNetwork(FunctionSet):
def __init__(self, in_shape, out_shape, trans_type="translation"):
assert trans_type in ["translation", "affine"]
sqrt2 = np.sqrt(2)
out_size = np.prod(out_shape)
super(SpatialTransformerNetwork, self).__init__(
st=stm.SpatialTransformer(in_shape, out_shape, "affine"),
fc1=F.Linear(out_size, 256, wscale=sqrt2),
fc2=F.Linear(256, 256, wscale=sqrt2),
fc3=F.Linear(256, 10, wscale=sqrt2)
)
def forward(self, x_batch, train=False):
x = Variable(x_batch, volatile=not train)
x_st, theta, points = self.st(x, True)
h = F.relu(self.fc1(x_st))
h = F.dropout(h, train=train)
h = F.relu(self.fc2(h))
h = F.dropout(h, train=train)
y = self.fc3(h)
return y, x_st, theta, points
def compute_loss(self, x_batch, t_batch, train=False,
return_variables=False):
y, x_st, theta, points = self.forward(x_batch, train=train)
t = Variable(t_batch, volatile=not train)
loss = F.softmax_cross_entropy(y, t)
accuracy = F.accuracy(y, t)
if return_variables:
return (loss, accuracy, (y, x_st, theta, points))
else:
return (loss, accuracy)
if __name__ == '__main__':
try:
x_train_data
except NameError:
(x_train_data, t_train_data,
x_valid_data, t_valid_data,
x_test_data, t_test_data) = stm.load_cluttered_mnist()
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
x_valid_data = cuda.to_cpu(x_valid_data)
t_valid_data = cuda.to_cpu(t_valid_data)
x_test_data = cuda.to_cpu(x_test_data)
t_test_data = cuda.to_cpu(t_test_data)
num_train = len(x_train_data)
num_valid = len(x_valid_data)
num_test = len(x_test_data)
in_shape = x_train_data.shape[1:]
out_shape = (28, 28)
model = SpatialTransformerNetwork(in_shape, out_shape, "affine")
if args.gpu >= 0:
model.to_gpu()
x_valid_data = cuda.to_gpu(x_valid_data)
t_valid_data = cuda.to_gpu(t_valid_data)
x_test_data = cuda.to_gpu(x_test_data)
t_test_data = cuda.to_gpu(t_test_data)
initial_model = copy.deepcopy(model)
optimizer = optimizers.Adam()
optimizer.setup(model)
batch_size = 250
num_batches = num_train / batch_size
max_epochs = 1000
l2_reg = 0.000001
train_loss_history = []
train_accuracy_history = []
valid_loss_history = []
valid_accuracy_history = []
valid_loss_best = 100
valid_accuracy_best = 0
epoch_best = 0
try:
for epoch in xrange(max_epochs):
print "epoch", epoch,
time_begin = time.time()
losses = []
accuracies = []
gWs = 0
perm = np.random.permutation(num_train)
for indices in np.array_split(perm, num_batches):
x_batch = xp.asarray(x_train_data[indices])
t_batch = xp.asarray(t_train_data[indices])
loss, accuracy, variables = model.compute_loss(
x_batch, t_batch, train=True, return_variables=True)
y, x_st, theta, points = variables
optimizer.zero_grads()
loss.backward()
# optimizer.weight_decay(l2_reg)
# optimizer.clip_grads(500)
optimizer.update()
losses.append(cuda.to_cpu(loss.data))
accuracies.append(cuda.to_cpu(accuracy.data))
gWs += np.array([np.linalg.norm(cuda.to_cpu(w)) for w in
model.gradients[::2]])
train_loss = np.mean(losses)
train_accuracy = np.mean(accuracies)
valid_loss, valid_accuracy, valid_variables = model.compute_loss(
x_valid_data, t_valid_data, train=False, return_variables=True)
y, x_st, theta, points = variables
y_valid, x_st_valid, theta_valid, points_valid = valid_variables
valid_loss = cuda.to_cpu(valid_loss.data)
valid_accuracy = cuda.to_cpu(valid_accuracy.data)
if valid_loss < valid_loss_best:
model_best = copy.deepcopy(model)
valid_loss_best = valid_loss
valid_accuracy_best = valid_accuracy
epoch_best = epoch
print "(Best score!)",
print "(time: %f)" % (time.time() - time_begin)
# print norms of the weights
print " |W|", [np.linalg.norm(cuda.to_cpu(w)) for w in
model.parameters[::2]]
print " |gW|", gWs.astype(np.float32).tolist()
# pring scores
train_loss_history.append(train_loss)
train_accuracy_history.append(train_accuracy)
valid_loss_history.append(valid_loss)
valid_accuracy_history.append(valid_accuracy)
print " [train] loss: %f" % train_loss
print " [valid] loss: %f" % valid_loss
print " [valid] best loss: %f (at #%d)" % (valid_loss_best,
epoch_best)
print " [train] accuracy: %f" % train_accuracy
print " [valid] accuracy: %f" % valid_accuracy
print " [valid] best accuracy: %f (at #%d)" % (
valid_accuracy_best, epoch_best)
# plot loss histories
fig = plt.figure()
plt.plot(np.arange(epoch+1), np.array(train_loss_history))
plt.plot(np.arange(epoch+1), np.array(valid_loss_history), '-g')
plt.plot([0, epoch+1], [valid_loss_best]*2, '-g')
plt.ylabel('loss')
plt.ylim([0, 2])
plt.legend(['tloss', 'vloss'],
loc='best')
# plot accuracy histories
plt.twinx()
plt.plot(np.arange(epoch+1), np.array(train_accuracy_history))
plt.plot(np.arange(epoch+1), np.array(valid_accuracy_history),
'r-')
plt.plot([0, epoch+1], [valid_accuracy_best]*2, 'r-')
plt.ylabel('accuracy')
plt.ylim([0.6, 1])
plt.legend(['tacc', 'vacc'],
loc='best')
plt.plot([epoch_best]*2, [0, 1], '-k')
plt.grid()
plt.show()
plt.draw()
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
print "model.theta.bias:", model.st.parameters[-1]
print "theta:", theta.data[0]
ax.matshow(cuda.to_cpu(x_batch[0]).reshape(in_shape),
cmap=plt.cm.gray)
corners_x, corners_y = cuda.to_cpu(points.data[0])[
:, [0, out_shape[1] - 1, -1, - out_shape[1]]]
# print "theta:", theta_valid.data[0]
# ax.matshow(x_valid_data[0].reshape(in_shape), cmap=plt.cm.gray)
# corners_x, corners_y = points_valid.data[0][:, [0, 27, -1, -28]]
ax.plot(corners_x[[0, 1]], corners_y[[0, 1]])
ax.plot(corners_x[[1, 2]], corners_y[[1, 2]])
ax.plot(corners_x[[2, 3]], corners_y[[2, 3]])
ax.plot(corners_x[[0, 3]], corners_y[[0, 3]])
ax.set_xlim([0, 60])
ax.set_ylim([60, 0])
ax = fig.add_subplot(1, 2, 2)
ax.matshow(cuda.to_cpu(x_st.data[0]).reshape(out_shape),
cmap=plt.cm.gray)
# ax.matshow(x_st_valid.data[0].reshape(out_shape), cmap=plt.cm.gray)
plt.show()
plt.draw()
except KeyboardInterrupt:
pass
| mit |
theonaun/surgeo | tests/app/test_cli.py | 1 | 5819 | import os
import pathlib
import subprocess
import sys
import tempfile
import unittest
import numpy as np
import pandas as pd
import surgeo.app.surgeo_cli
class TestSurgeoCLI(unittest.TestCase):
_CLI_SCRIPT = surgeo.app.surgeo_cli.__file__
_DATA_FOLDER = pathlib.Path(__file__).resolve().parents[1] / 'data'
_CSV_OUTPUT_PATH = str(
pathlib.Path(tempfile.gettempdir())
.joinpath('temp_surgeo.csv')
.resolve()
)
_EXCEL_OUTPUT_PATH = str(
pathlib.Path(tempfile.gettempdir())
.joinpath('temp_surgeo.xlsx')
.resolve()
)
def tearDown(self):
if pathlib.Path(self._CSV_OUTPUT_PATH).exists():
os.unlink(self._CSV_OUTPUT_PATH)
if pathlib.Path(self._EXCEL_OUTPUT_PATH).exists():
os.unlink(self._EXCEL_OUTPUT_PATH)
def _compare(self, input_name, model_type, true_output_name, **kwargs):
""""Helper function that runs the comparison
Parameters
----------
input_name : str
The file name of the data to be tested
model_type : str
The model type being tested: ['bifsg', 'surgeo', 'first', 'sur', 'geo']
true_output_name: str
The correct data for comparison
kwargs : **kwargs
A kwarg dict with the keys being the --optional arguments and the values
being the values associated with those arguments.
"""
# Generate input name based on input file
input_path = str(self._DATA_FOLDER / input_name)
# Run a process that writes to CSV output
subprocess_commands = [
sys.executable,
self._CLI_SCRIPT,
input_path,
self._CSV_OUTPUT_PATH,
model_type,
]
if kwargs:
# Initial clever code:
# subprocess_commands.extend([command for kvp in [[f'--{key}', kwargs[key]] for key in kwargs.keys()] for command in kvp])
#
# Subsequent dumbed down code:
# Convert kwarg key, value pairs to optional arguments e.g. ['--surname_column', 'custom_surname_col_name']
argument_pairs = [
[f'--{key}', value]
for key, value
in kwargs.items()
]
# Add them to the subprocess arguments
for argument_pair in argument_pairs:
subprocess_commands.extend(argument_pair)
subprocess.run(subprocess_commands, stderr=None)
# Read the newly generated information
df_generated = pd.read_csv(self._CSV_OUTPUT_PATH)
# Read the true information
df_true = pd.read_csv(self._DATA_FOLDER / true_output_name)
# Compare values
self._is_close_enough(df_generated, df_true)
def _is_close_enough(self, df_generated, df_true):
"""Helper function to select floats, round them, and compare"""
df_generated = df_generated.select_dtypes(np.float64).round(4)
df_true = df_true.select_dtypes(np.float64).round(4)
self.assertTrue(df_generated.equals(df_true))
def test_surgeo_cli(self):
"""Test BISG model functionality of CLI"""
self._compare(
'surgeo_input.csv',
'surgeo',
'surgeo_output.csv',
)
def test_bifsg_cli(self):
"""Test bifsg model functionality of CLI"""
self._compare(
'bifsg_input.csv',
'bifsg',
'bifsg_output.csv',
surname_column='surname',
first_name_column='first_name'
)
def test_first_cli(self):
"""Test first name model functionality of CLI"""
self._compare(
'first_name_input.csv',
'first',
'first_name_output.csv',
)
def test_sur_cli(self):
"""Test surname model functionality of CLI"""
self._compare(
'surname_input.csv',
'sur',
'surname_output.csv',
)
def test_geo_cli(self):
"""Test geocode model functionality of CLI"""
self._compare(
'geocode_input.csv',
'geo',
'geocode_output.csv',
)
def test_excel(self):
"""Test Excel functionality of CLI"""
# Generate input name based on input file
input_path = str(self._DATA_FOLDER / 'surgeo_input.xlsx')
# Run a process that writes to CSV output
subprocess.run([
sys.executable,
self._CLI_SCRIPT,
input_path,
self._EXCEL_OUTPUT_PATH,
'surgeo'
])
# Read the newly generated information
df_generated = pd.read_excel(self._EXCEL_OUTPUT_PATH, engine='openpyxl')
# Read the true information
df_true = pd.read_excel(self._DATA_FOLDER / 'surgeo_output.xlsx', engine='openpyxl')
self._is_close_enough(df_generated, df_true)
def test_malformed(self):
"""Test arguments to specify column names"""
# Generate input name based on input file
input_path = str(self._DATA_FOLDER / 'surgeo_input_misnamed.csv')
# Run a process that writes to CSV output
subprocess.run([
sys.executable,
self._CLI_SCRIPT,
input_path,
self._CSV_OUTPUT_PATH,
'surgeo',
'--zcta_column',
'info_zip',
'--surname_column',
'info_name',
])
# Read the newly generated information
df_generated = pd.read_csv(self._CSV_OUTPUT_PATH)
# Read the true information
df_true = pd.read_csv(self._DATA_FOLDER / 'surgeo_output.csv')
self._is_close_enough(df_generated, df_true)
if __name__ == '__main__':
unittest.main()
| mit |
lneuhaus/pyrpl | pyrpl/software_modules/spectrum_analyzer.py | 1 | 29677 | ###############################################################################
# pyrpl - DSP servo controller for quantum optics with the RedPitaya
# Copyright (C) 2014-2016 Leonhard Neuhaus (neuhaus@spectro.jussieu.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
The spectrum analyzer measures the magnitude of an input signal versus
frequency. There are two working modes for the spectrum analyzer implemented in
pyrpl:
- iq mode: the input signal is demodulated around the center_frequency of
the analysis window (using iq2). The slowly varying quadratures are
subsequently sent to the 2 channels of the scope. The complex IQ time trace
is built from the sum I(t) + iQ(t). The spectrum is then evaluated by
performing a Fourier transforom of the the complex iq signal.
- baseband mode: up to 2 channels are available in baseband mode. The
channels are digitized by the scope and the real traces are directly Fourier
transformed. Since both channels are acquired simultaneously, it is also
possible to retrieve the cross spectrum between channel 1 and channel 2 (the
relative phase of the fourier transform coefficients is meaningful)
At the moment, the iq mode is deactivated since we haven't yet implemented
the sharp antialiasing filters required to avoid polluting the analysis
windows from aliased noise originating from outside the Nyquist frequency of
the scope acquisition. However, we are planning on implementing such a
filter with the iir module in the near future.
In the following example, we are going to demonstrate how to measure a
sinusoidal signal and a white noise originating from an asg
.. code :: python
# let's use a module manager for the asg
with p.asgs.pop('user') as asg:
# setup a sine at 100 kHz
asg.setup(frequency=1e5, waveform='sin', trigger_source='immediately', amplitude=1., offset=0)
# setup the spectrumanalyzer in baseband mode
p.spectrumanalyzer.setup(input1_baseband=asg, #note that input1_baseband!=input)
baseband=True, # only mod eavailable right now
span=1e6, # span of the analysis (/2 in iq mode)
window=blackman # filter window)
# the return format is (spectrum for channel 1, spectrum for channel 2,
# real part of cross spectrum, imaginary part of cross spectrum):
ch1, ch2, cross_re, cross_im = p.spectrumanalyzer.curve()
# plot the spectrum
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(p.spectrumanalyzer.frequencies, ch1)
We notice that the spectrum is peaked around 100 kHz (The width of the peak
is given by the residual bandwidth), and the height of the peak is 1.
The internal unit of the spectrum analyzer is V_pk^2, such that a 1 V sine
results in a 1 Vpk^2 peak in the spectrum. To convert the spectrum in units
of noise spectral density, a utility function is provided: data_to_unit()
.. code :: python
# let's use a module manager for the asg
with p.asgs.pop('user') as asg:
# setup a white noise of variance 0.1 V
asg.setup(frequency=1e5, waveform='noise', trigger_source='immediately', amplitude=0.1, offset=0)
# setup the spectrumanalyzer in baseband mode and full span
p.spectrumanalyzer.setup(input1_baseband=asg, baseband=True, span=125e6)
# the return format is (spectrum for channel 1, spectrum for channel 2,
# real part of cross spectrum, imaginary part of cross spectrum):
ch1, ch2, cross_re, cross_im = p.spectrumanalyzer.curve()
# convert to Vrms^2/Hz
data = p.spectrumanalyzer.data_to_unit(ch1, 'Vrms^2/Hz', p.spectrumanalyzer.rbw)
# plot the spectrum
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(p.spectrumanalyzer.frequencies, data)
# integrate spectrum from 0 to nyquist frequency
df = p.spectrumanalyzer.frequencies[1] - p.spectrumanalyzer.frequencies[0]
print(sum(data)*df)
As expected, the integral of the noise spectrum over the whole frequency
range gives the variance of the noise. To know more about spectrum analysis
in Pyrpl, and in particular, how the filtering windows are normalized, please
refer to the section :ref:`How a spectrum is computed in PyRPL`.
"""
import logging
logger = logging.getLogger(name=__name__)
from ..module_attributes import *
from ..hardware_modules import Scope
from ..hardware_modules.dsp import all_inputs, InputSelectProperty
from ..acquisition_module import AcquisitionModule
from ..widgets.module_widgets import SpecAnWidget
import sys
import scipy.signal as sig
import scipy.fft
# Some initial remarks about spectrum estimation:
# Main source: Oppenheim + Schaefer, Digital Signal Processing, 1975
class DisplayUnitProperty(SelectProperty):
def set_value(self, obj, value):
super(DisplayUnitProperty, self).set_value(obj, value)
obj._emit_signal_by_name('unit_changed')
class CenterAttribute(FrequencyProperty):
def get_value(self, instance):
if instance is None:
return self
if instance.baseband:
return 0.0
else:
return instance.iq.frequency
def set_value(self, instance, value):
if instance.baseband and value != 0:
# former solution:
# raise ValueError("Nonzero center frequency not allowed in "
# "baseband mode.")
# more automatic way:
instance.baseband = False
if not instance.baseband:
instance.iq.frequency = value
return value
class SpecAnAcBandwidth(FilterProperty):
def valid_frequencies(self, module):
return [freq for freq in
module.iq.__class__.inputfilter.valid_frequencies(module.iq)
if freq >= 0]
class RbwProperty(FilterProperty):
def valid_frequencies(self, module):
sample_rates = [1./st for st in Scope.sampling_times]
return [module.equivalent_noise_bandwidth()*sr for sr in sample_rates]
def get_value(self, obj):
sampling_rate = 1./(8e-9*obj.decimation)
return obj.equivalent_noise_bandwidth()*sampling_rate
def set_value(self, obj, value):
if np.iterable(value):
value = value[0]
sample_rate = value/obj.equivalent_noise_bandwidth()
obj.decimation = int(round(1./(sample_rate*8e-9)))
class SpanFilterProperty(FilterProperty):
def valid_frequencies(self, instance):
return instance.spans
def get_value(self, obj):
#val = super(SpanFilterProperty, self).get_value(obj)
#if np.iterable(val):
# return val[0] # maybe this should be the default behavior for
# FilterAttributes... or make another Attribute type
#else:
# return val
return 1./(8e-9*obj.decimation)
def set_value(self, obj, value):
if np.iterable(value):
value = value[0]
sampling_time = 1./value
obj.decimation = int(round(sampling_time/8e-9))
class DecimationProperty(SelectProperty):
"""
Since the only integer number in [rbw, span, duration, decimation] is
decimation, it is better to take it as the master property to avoid
rounding problems.
We don't want to use the scope property because when the scope is not
slaved, the value could be anything.
"""
def set_value(self, obj, value):
super(DecimationProperty, self).set_value(obj, value)
obj.__class__.span.value_updated(obj, obj.span)
obj.__class__.rbw.value_updated(obj, obj.rbw)
class WindowProperty(SelectProperty):
"""
Changing the filter window requires to recalculate the bandwidth
"""
def set_value(self, obj, value):
super(WindowProperty, self).set_value(obj, value)
obj.__class__.rbw.refresh_options(obj)
class SpectrumAnalyzer(AcquisitionModule):
"""
A spectrum analyzer is composed of an IQ demodulator, followed by a scope.
The spectrum analyzer connections are made upon calling the function setup.
"""
_widget_class = SpecAnWidget
_gui_attributes = ["input",
"center",
"baseband",
"span",
"rbw",
#"points",
"window",
"acbandwidth",
"display_unit",
"display_input1_baseband",
"display_input2_baseband",
"input1_baseband",
"input2_baseband",
"display_cross_amplitude",
]#"display_cross_phase"]
_setup_attributes =["input",
"center",
"baseband",
# better to set baseband after center,
# otherwise, asking for baseband explicitly would be
# overwritten by non-zero center
"span",
#"rbw",
#"points",
"window",
"acbandwidth",
"display_unit",
"curve_unit",
"display_input1_baseband",
"display_input2_baseband",
"input1_baseband",
"input2_baseband",
"display_cross_amplitude",]
#"display_cross_phase"]
PADDING_FACTOR = 16
# numerical values
nyquist_margin = 1.0
if_filter_bandwidth_per_span = 1.0
_enb_cached = dict() # Equivalent noise bandwidth for filter windows
quadrature_factor = 1.# 0.1*1024
# unit Vpk is such that the level of a peak in the spectrum indicates the
# correct voltage amplitude of a coherent signal (linear scale)
# more units can be added as needed, but need to guarantee that conversion
# is done as well (see implementation in lockbox for example)
display_unit = DisplayUnitProperty(default="dB(Vpk^2)",
options=["Vpk^2",
"dB(Vpk^2)",
"Vpk",
"Vrms^2",
"dB(Vrms^2)",
"Vrms",
"Vrms^2/Hz",
"dB(Vrms^2/Hz)",
"Vrms/sqrt(Hz)"],
ignore_errors=True)
# curve_unit is only used to have a parameter 'curve_unit' in saved curves.
# If >1 options are implemented, you should ensure that _get_curve takes it
# into account.
curve_unit = SelectProperty(default="Vpk^2", options=["Vpk^2"], ignore_errors=True)
# select_attributes list of options
def spans(nyquist_margin):
# see http://stackoverflow.com/questions/13905741/
# [int(np.ceil(1. / nyquist_margin / s_time))
return [1./s_time \
for s_time in Scope.sampling_times]
spans = spans(nyquist_margin)
windows = ['blackman', 'flattop', 'boxcar', 'hamming', 'gaussian'] # more
# can be
# added here (see http://docs.scipy.org/doc/scipy/reference/generated
# /scipy.signal.get_window.html#scipy.signal.get_window)
@property
def inputs(self):
return all_inputs(self).keys()
# attributes
baseband = BoolProperty(default=True, call_setup=True)
span = SpanFilterProperty(doc="""
Span can only be given by 1./sampling_time where sampling
time is a valid scope sampling time.
""",
call_setup=True)
center = CenterAttribute(call_setup=True)
# points = IntProperty(default=16384, call_setup=True)
window = WindowProperty(options=windows, call_setup=True)
input = InputSelectProperty(options=all_inputs,
default='in1',
call_setup=True,
ignore_errors=True)
input1_baseband = InputSelectProperty(options=all_inputs,
default='in1',
call_setup=True,
ignore_errors=True,
doc="input1 for baseband mode")
input2_baseband = InputSelectProperty(options=all_inputs,
default='in2',
call_setup=True,
ignore_errors=True,
doc="input2 for baseband mode")
display_input1_baseband = BoolProperty(default=True,
doc="should input1 spectrum be "
"displayed in "
"baseband-mode?")
display_input2_baseband = BoolProperty(default=True,
doc="should input2 spectrum be "
"displayed in "
"baseband-mode?")
display_cross_amplitude = BoolProperty(default=True,
doc="should cross-spectrum "
"amplitude be displayed "
"in baseband-mode?")
display_cross_phase = BoolProperty(default=False,
doc="should cross-spectrum amplitude"
" be displayed in "
"baseband-mode?")
rbw = RbwProperty(doc="Residual Bandwidth, this is a readonly "
"attribute, only span can be changed.")
decimation = DecimationProperty(options=Scope.decimations,
doc="Decimation setting for the "
"scope.")
acbandwidth = SpecAnAcBandwidth(call_setup=True)
def __init__(self, parent, name=None):
super(SpectrumAnalyzer, self).__init__(parent, name=name)
self._transfer_function_square_cached = None
@property
def iq(self):
if not hasattr(self, '_iq'):
self._iq = self.pyrpl.rp.iq2 # can't use the normal pop
# mechanism because we specifically want the customized iq2
self._iq.owner = self.name
return self._iq
iq_quadraturesignal = 'iq2_2'
def _remaining_duration(self):
"""
Duration before next scope curve will be ready.
"""
return self.scope._remaining_duration()
@property
def data_length(self):
return self.scope.data_length
#return int(self.points) # *self.nyquist_margin)
@property
def sampling_time(self):
return 1. / self.nyquist_margin / self.span
def _remaining_duration(self):
return self.scope._remaining_duration()
def curve_ready(self):
return self.scope.curve_ready()
@property
def scope(self):
return self.pyrpl.rp.scope
@property
def duration(self):
return self.scope.duration
def filter_window(self):
"""
:return: filter window
"""
if self.window=='gaussian':
# a tuple with the std is needed for Gaussian window
window_name = ('gaussian', self.data_length/10)
else:
window_name = self.window
window = sig.get_window(window_name, self.data_length, fftbins=False)
# empirical value for scaling flattop to sqrt(W)/V
window/=(np.sum(window)/2)
return window
def _get_iq_data(self):
"""
:return: complex iq time trace
"""
res = self.scope._get_curve()
if self.baseband:
return res[0][:self.data_length] + 1j*res[1][:self.data_length]
else:
return (res[0] + 1j*res[1])[:self.data_length]
# res += 1j*self.scope.curve(2, timeout=None)
# return res[:self.data_length]
def _get_filtered_iq_data(self):
"""
:return: the product between the complex iq data and the filter_window
"""
return self._get_iq_data() * np.asarray(self.filter_window(),
dtype=np.complex)
def useful_index_obsolete(self):
"""
:return: a slice containing the portion of the spectrum between start
and stop
"""
middle = int(self.data_length / 2)
length = self.points # self.data_length/self.nyquist_margin
if self.baseband:
return slice(middle-1, middle + length/2 + 1)#slice(middle,
# int(middle + length /
# 2 +
# 1))
else:
return slice(int(middle - length/2), int(middle + length/2 + 1))
@property
def _real_points(self):
"""
In baseband, only half of the points are returned
:return: the real number of points that will eventually be returned
"""
return self.points/2 if self.baseband else self.points
@property
def frequencies(self):
"""
:return: frequency array
"""
if self.baseband:
return np.fft.rfftfreq(self.data_length*self.PADDING_FACTOR,
self.sampling_time)
else:
return self.center + scipy.fft.fftshift( scipy.fft.fftfreq(
self.data_length*self.PADDING_FACTOR,
self.sampling_time)) #[self.useful_index()]
def data_to_dBm(self, data): # will become obsolete
# replace values whose log doesnt exist
data[data <= 0] = 1e-100
# conversion to dBm scale
return 10.0 * np.log10(data) + 30.0
def data_to_unit(self, data, unit, rbw):
"""
Converts the array 'data', assumed to be in 'Vpk^2', into the
specified unit. Unit can be anything in ['Vpk^2', 'dB(Vpk^2)',
'Vrms^2', 'dB(Vrms^2)', 'Vrms', 'Vrms^2/Hz'].
Since some units require a rbw for the conversion, it is an explicit
argument of the function.
"""
data = abs(data)
if unit == 'Vpk^2':
return data
if unit == 'dB(Vpk^2)':
# need to add epsilon to avoid divergence of logarithm
return 10 * np.log10(data + sys.float_info.epsilon)
if unit == 'Vpk':
return np.sqrt(data)
if unit == 'Vrms^2':
return data / 2
if unit == 'dB(Vrms^2)':
# need to add epsilon to avoid divergence of logarithm
return 10 * np.log10(data / 2 + sys.float_info.epsilon)
if unit == 'Vrms':
return np.sqrt(data) / np.sqrt(2)
if unit == 'Vrms^2/Hz':
return data / 2 / rbw
if unit == 'dB(Vrms^2/Hz)':
# need to add epsilon to avoid divergence of logarithm
return 10 * np.log10(data / 2 / rbw + sys.float_info.epsilon)
if unit == 'Vrms/sqrt(Hz)':
return np.sqrt(data) / np.sqrt(2) / rbw
def data_to_display_unit(self, data, rbw):
"""
Converts the array 'data', assumed to be in 'Vpk^2', into display
units.
Since some units require a rbw for the conversion, it is an explicit
argument of the function.
"""
return self.data_to_unit(data, self.display_unit, rbw)
def transfer_function_iq(self, frequencies):
# transfer function calculations
tf_iq = np.ones(len(frequencies), dtype=complex)
# iq transfer_function
if not self.baseband:
displaced_freqs = frequencies - self.center
for f in self._iq_bandwidth():
if f == 0:
continue
elif f > 0: # lowpass
tf_iq *= 1.0 / (
1.0 + 1j * displaced_freqs / f)
# quadrature_delay += 2
elif f < 0: # highpass
tf_iq *= 1.0 / (
1.0 + 1j * f / displaced_freqs)
# quadrature_delay += 1 # one cycle extra delay per
# highpass
return tf_iq
def transfer_function_scope(self, frequencies):
# scope transfer function
if not self.baseband:
displaced_freqs = frequencies - self.center
else:
displaced_freqs = frequencies
if self._scope_decimation()>1:
norm_freq = self._scope_decimation()*displaced_freqs/125e6
return np.sinc(norm_freq)
else:
return np.ones(len(displaced_freqs))
def transfer_function(self, frequencies):
"""
Transfer function from the generation of quadratures to their
sampling, including scope decimation. At the moment, delays are not
taken into account (and the phase response is not guaranteed to be
exact.
"""
return self.transfer_function_iq(frequencies) * \
self.transfer_function_scope(frequencies)
# Concrete implementation of AcquisitionModule methods
# ----------------------------------------------------
@property
def data_x(self):
return self.frequencies
def _new_run_future(self):
# Redefined because a SpecAnRun needs to know its rbw
super(SpectrumAnalyzer, self)._new_run_future()
self._run_future.rbw = self.rbw
def _free_up_resources(self):
self.scope.free()
def _get_curve(self):
"""
No transfer_function correction
:return:
"""
iq_data = self._get_filtered_iq_data() # get iq data (from scope)
if not self.running_state in ["running_single", "running_continuous"]:
self.pyrpl.scopes.free(self.scope) # free scope if not continuous
if self.baseband:
# In baseband, where the 2 real inputs are stored in the real and
# imaginary part of iq_data, we need to make 2 different FFTs. Of
# course, we could do it naively by calling twice fft, however,
# this is not optimal:
# x = rand(10000)
# y = rand(10000)
# %timeit fft.fft(x) # --> 74.3 us (143 us with numpy)
# %timeit fft.fft(x + 1j*y) # --> 163 us (182 us with numpy)
# A convenient option described in Oppenheim/Schafer p.
# 333-334 consists in taking the right combinations of
# negative/positive/real/imaginary part of the complex fft,
# however, an optimized function for real FFT is already provided:
# %timeit fft.rfft(x) # --> 63 us (72.7 us with numpy)
# --> In fact, we will use numpy.rfft insead of
# scipy.fft.rfft because the output
# format is directly a complex array, and thus, easier to handle.
fft1 = np.fft.rfft(np.real(iq_data),
self.data_length*self.PADDING_FACTOR)
fft2 = np.fft.rfft(np.imag(iq_data),
self.data_length*self.PADDING_FACTOR)
cross_spectrum = np.conjugate(fft1)*fft2
res = np.array([abs(fft1)**2,
abs(fft2)**2,
np.real(cross_spectrum),
np.imag(cross_spectrum)])
# at some point, we need to cache the tf for performance
self._last_curve_raw = res # for debugging purpose
return res/abs(self.transfer_function(self.frequencies))**2
else:
# Realize the complex fft of iq data
res = scipy.fft.fftshift(scipy.fft.fft(iq_data,
self.data_length*self.PADDING_FACTOR))
# at some point we need to cache the tf for performance
self._last_curve_raw = np.abs(res)**2 # for debugging purpose
return self._last_curve_raw/abs(self.transfer_function(
self.frequencies))**2
#/ abs(self.transfer_function(
#self.frequencies))**2
# [self.useful_index()]
def _remaining_time(self):
"""
:returns curve duration - ellapsed duration since last setup() call.
"""
return self.scope._remaining_time()
def _data_ready(self):
"""
:return: True if curve is ready in the hardware, False otherwise.
"""
return self.scope._data_ready()
def _iq_bandwidth(self):
return self.iq.__class__.bandwidth.validate_and_normalize(
self.iq,
[self.span*self.if_filter_bandwidth_per_span]*4)
def _scope_decimation(self):
return self.scope.__class__.decimation.validate_and_normalize(
self.scope,
int(round(self.sampling_time/8e-9)))
def _scope_duration(self):
return self._scope_decimation()*8e-9*self.data_length
def _start_acquisition(self):
autosave_backup = self._autosave_active
# setup iq module
if not self.baseband:
#raise NotImplementedError("iq mode is not supported in the "
# "current release of Pyrpl.")
self.iq.setup(
input = self.input,
bandwidth=self._iq_bandwidth(),
gain=0,
phase=0,
acbandwidth=self.acbandwidth,
amplitude=0,
output_direct='off',
output_signal='quadrature',
quadrature_factor=self.quadrature_factor)
# change scope ownership in order not to mess up the scope
# configuration
if self.scope.owner != self.name:
self.pyrpl.scopes.pop(self.name)
# setup scope
if self.baseband:
input1 = self.input1_baseband
input2 = self.input2_baseband
else:
input1 = self.iq
input2 = self.iq_quadraturesignal
self.scope.setup(input1=input1,
input2=input2,
average=True,
duration=self.scope.data_length*self._scope_decimation()*8e-9,
trigger_source="immediately",
ch1_active=True,
ch2_active=True,
rolling_mode=False,
running_state='stopped')
return self.scope._start_acquisition()
def save_curve(self):
"""
Saves the curve(s) that is (are) currently displayed in the gui in
the db_system. Also, returns the list [curve_ch1, curve_ch2]...
"""
"""
Saves the curve(s) that is (are) currently displayed in the gui in
the db_system. Also, returns the list [curve_ch1, curve_ch2]...
"""
if not self.baseband:
return super(SpectrumAnalyzer, self)._save_curve(self._run_future.data_x,
self._run_future.data_avg,
**self.setup_attributes)
else:
d = self.setup_attributes
curves = [None, None]
sp1, sp2, cross_real, cross_imag = self.data_avg
for ch, active in [(0, self.display_input1_baseband),
(1, self.display_input2_baseband)]:
if active:
d.update({'ch': ch,
'name': self.curve_name + ' ch' + str(ch + 1)})
curves[ch] = self._save_curve(self._run_future.data_x,
self._run_future.data_avg[ch],
**d)
if self.display_cross_amplitude:
d.update({'ch': 'cross',
'name': self.curve_name + ' cross'})
curves.append(self._save_curve(self._run_future.data_x,
self._run_future.data_avg[2] +
1j*self._run_future.data_avg[3],
**d))
return curves
def equivalent_noise_bandwidth(self):
"""Returns the equivalent noise bandwidth of the current window. To
get the residual bandwidth, this number has to be multiplied by the
sample rate."""
if not self.window in self._enb_cached:
filter_window = self.filter_window()
self._enb_cached[self.window] = (sum(filter_window ** 2)) / \
(sum(filter_window) ** 2)
return self._enb_cached[self.window]
| gpl-3.0 |
nicococo/ClusterSvdd | scripts/test_ad_svdd.py | 1 | 7209 | import matplotlib.pyplot as plt
import sklearn.metrics as metrics
import sklearn.datasets as datasets
import numpy as np
from ClusterSVDD.svdd_primal_sgd import SvddPrimalSGD
from ClusterSVDD.svdd_dual_qp import SvddDualQP
from ClusterSVDD.cluster_svdd import ClusterSvdd
def generate_data_uniform(datapoints, cluster_dir_alphas=(10, 10, 10), outlier_frac=0.1, feats=2, noise_feats=0):
cluster = len(cluster_dir_alphas)
X = np.zeros((feats, datapoints))
y = np.zeros(datapoints)
num_noise = np.int(np.floor(datapoints*outlier_frac))
samples = np.random.dirichlet(cluster_dir_alphas, 1)[0]
samples = np.array(samples*(datapoints-num_noise), dtype=np.int)
print samples, sum(samples)
if np.sum(samples)+num_noise < datapoints:
print('Add another sample..')
num_noise += datapoints-(np.sum(samples)+num_noise)
print num_noise+np.sum(samples), datapoints
cnt = num_noise
for i in range(cluster):
m = np.random.randn(feats-noise_feats)*8.
#cov = np.diag(np.random.rand(feats-noise_feats))
cov = 2.*np.random.rand() * np.eye(feats-noise_feats)
print cov
X[:feats-noise_feats, cnt:cnt+samples[i]] = np.random.multivariate_normal(m, cov, samples[i]).T
y[cnt:cnt+samples[i]] = i+1
cnt += samples[i]
mul = np.max(np.abs(X))*2.
print mul
X[:, :num_noise] = 2.*mul*(np.random.rand(feats, num_noise)-0.5)
y[:num_noise] = -1
X[feats-noise_feats:, :] = 2.*mul*np.random.randn(noise_feats, datapoints)
# normalize each feature [-1,+1]
X = X / np.repeat(np.max(np.abs(X), axis=1)[:, np.newaxis], datapoints, axis=1)
return X, y
def generate_data_moons(datapoints, outlier_frac=0.1, noise_feats=0.05):
X = np.zeros((datapoints, 2))
y = np.zeros(datapoints)
num_noise = np.int(np.floor(datapoints*outlier_frac))
X[num_noise:, :], y[num_noise:] = datasets.make_moons(n_samples=datapoints-num_noise, noise=noise_feats)
X = X.T
y[num_noise:] += 1
mul = np.max(np.abs(X))*1.5
print mul
X[:, :num_noise] = 2.*mul*(np.random.rand(2, num_noise)-0.5)
y[:num_noise] = -1
# normalize each feature [-1,+1]
X = X / np.repeat(np.max(np.abs(X), axis=1)[:, np.newaxis], datapoints, axis=1)
return X, y
def generate_data(datapoints, norm_dir_alpha=10., anom_dir_alpha=4., anom_cluster=[0, 0, 0, 1, 1, 1], feats=2):
cluster = len(anom_cluster)
X = np.zeros((feats, datapoints))
y = np.zeros(datapoints)
cluster_dir_alphas = np.array(anom_cluster)*anom_dir_alpha + (1-np.array(anom_cluster))*norm_dir_alpha
samples = np.random.dirichlet(cluster_dir_alphas, 1)[0]
samples = np.array(samples*datapoints, dtype=np.int)
if np.sum(samples) < datapoints:
print('Add another sample..')
samples[-1] += 1
cnt = 0
anom_lbl = -1
norm_lbl = 1
for i in range(cluster):
sigma = 8.
if anom_cluster[i] == 1:
sigma = 1.
m = np.random.randn(feats)*sigma
cov = np.diag(np.random.rand(feats))
print cov
X[:, cnt:cnt+samples[i]] = np.random.multivariate_normal(m, cov, samples[i]).T
label = norm_lbl
if anom_cluster[i] == 1:
label = anom_lbl
anom_lbl -= 1
else:
label = norm_lbl
norm_lbl += 1
y[cnt:cnt+samples[i]] = label
cnt += samples[i]
# normalize each feature [-1,+1]
X = X / np.repeat(np.max(np.abs(X), axis=1)[:, np.newaxis], datapoints, axis=1)
return X, y
def evaluate(nu, k, data, y, train, test, use_kernel=False, kparam=0.1, plot=False):
# fix the initialization for all methods
membership = np.random.randint(0, k, y.size)
svdds = list()
for l in range(k):
if use_kernel:
svdds.append(SvddDualQP('rbf', kparam, nu))
else:
svdds.append(SvddPrimalSGD(nu))
svdd = ClusterSvdd(svdds)
svdd.fit(data[:, train].copy(), max_iter=60, init_membership=membership[train])
scores, classes = svdd.predict(data[:, test].copy())
# normal classes are positive (e.g. 1,2,3,..) anomalous class is -1
print y[test]
true_lbl = y[test]
true_lbl[true_lbl < 0] = -1 # convert outliers to single outlier class
ari = metrics.cluster.adjusted_rand_score(true_lbl, classes)
if nu < 1.0:
classes[scores > 0.] = -1
ari = metrics.cluster.adjusted_rand_score(true_lbl, classes)
print 'ARI=', ari
fpr, tpr, _ = metrics.roc_curve(y[test]<0., scores, pos_label=1)
auc = metrics.auc(fpr, tpr, )
print 'AUC=', auc
if plot:
plt.figure(1)
anom_inds = np.where(y == -1)[0]
plt.plot(data[0, anom_inds], data[1, anom_inds], '.g', markersize=2)
nom_inds = np.where(y != -1)[0]
plt.plot(data[0, nom_inds], data[1, nom_inds], '.r', markersize=6)
an = np.linspace(0, 2*np.pi, 100)
for l in range(k):
r = np.sqrt(svdd.svdds[l].radius2)
if hasattr(svdd.svdds[l],'c'):
plt.plot(svdd.svdds[l].c[0], svdd.svdds[l].c[1],
'xb', markersize=6, linewidth=2, alpha=0.7)
plt.plot(r*np.sin(an)+svdd.svdds[l].c[0], r*np.cos(an)+svdd.svdds[l].c[1],
'-b', linewidth=2, alpha=0.7)
plt.show()
return ari, auc
if __name__ == '__main__':
num_train = 600
num_test = 600
train = np.array(range(num_train), dtype='i')
test = np.array(range(num_train, num_train + num_test), dtype='i')
reps = 1
nus = [0.1, 0.5, 0.8, 1.0]
ks = [3]
aris = np.zeros((reps, len(nus),len(ks)))
aucs = np.zeros((reps, len(nus),len(ks)))
data, y = generate_data_uniform(num_train + num_test, cluster_dir_alphas=(10, 10, 10), outlier_frac=0.5, feats=2, noise_feats=0)
# data, y = generate_data(num_train + num_test, norm_dir_alpha=10., anom_dir_alpha=2., anom_cluster=[0, 0, 0, 1, 1, 1, 1, 1, 1], feats=2)
# data, y = generate_data_moons(num_train + num_test, outlier_frac=0.3, noise_feats=0.05)
for r in range(reps):
# data, y = generate_data_uniform(num_train + num_test, cluster_dir_alphas=(10, 10, 10), outlier_frac=0.25, feats=2, noise_feats=0)
inds = np.random.permutation((num_test + num_train))
data = data[:, inds]
y = y[inds]
# inds = np.where(y>=-1)[0]
# rinds = np.random.permutation(inds.size)
# train = inds[rinds[:num_train]]
# test = np.setdiff1d(np.arange(num_train+num_test), train)
ssseeed = np.random.randint(low=0, high=1101010)
for nu in range(len(nus)):
for k in range(len(ks)):
np.random.seed(ssseeed)
aris[r, nu, k], aucs[r, nu, k] = evaluate(nus[nu], ks[k], data, y, train, test, use_kernel=False, kparam=1., plot=False)
print '\n'
for nu in range(len(nus)):
print ''
for k in range(len(ks)):
print('k={0} nu={1}: ARI = {2:1.2f}+/-{4:1.2f} AUC = {3:1.2f}+/-{4:1.2f}'.format(ks[k], nus[nu],
np.mean(aris[:, nu, k]), np.mean(aucs[:, nu, k]), np.std(aris[:, nu, k]), np.std(aucs[:, nu, k])))
print('\nDONE :)')
| mit |
michrawson/nyu_ml_lectures | notebooks/figures/plot_rbf_svm_parameters.py | 19 | 2018 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
from .plot_2d_separator import plot_2d_separator
def make_handcrafted_dataset():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def plot_rbf_svm_parameters():
X, y = make_handcrafted_dataset()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for ax, C in zip(axes, [1e0, 5, 10, 100]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(kernel='rbf', C=C).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("C = %f" % C)
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
# plot support vectors
sv = svm.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from IPython.html.widgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
| cc0-1.0 |
wzbozon/statsmodels | statsmodels/sandbox/examples/try_multiols.py | 33 | 1243 | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
| bsd-3-clause |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 2