repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
rafwiewiora/msmbuilder | msmbuilder/tests/test_preprocessing.py | 6 | 8042 | import numpy as np
from numpy.testing.decorators import skipif
try:
from sklearn.preprocessing import (FunctionTransformer as
FunctionTransformerR)
from msmbuilder.preprocessing import FunctionTransformer
HAVE_FT = True
except:
HAVE_FT = False
try:
from sklearn.preprocessing import MinMaxScaler as MinMaxScalerR
from msmbuilder.preprocessing import MinMaxScaler
HAVE_MMS = True
except:
HAVE_MMS = False
try:
from sklearn.preprocessing import MaxAbsScaler as MaxAbsScalerR
from msmbuilder.preprocessing import MaxAbsScaler
HAVE_MAS = True
except:
HAVE_MAS = False
try:
from sklearn.preprocessing import RobustScaler as RobustScalerR
from msmbuilder.preprocessing import RobustScaler
HAVE_RS = True
except:
HAVE_RS = False
try:
from sklearn.preprocessing import StandardScaler as StandardScalerR
from msmbuilder.preprocessing import StandardScaler
HAVE_SS = True
except:
HAVE_SS = False
from sklearn.preprocessing import (Binarizer as BinarizerR,
Imputer as ImputerR,
KernelCenterer as KernelCentererR,
LabelBinarizer as LabelBinarizerR,
MultiLabelBinarizer as MultiLabelBinarizerR,
Normalizer as NormalizerR,
PolynomialFeatures as PolynomialFeaturesR)
from ..preprocessing import (Binarizer, Imputer, KernelCenterer,
LabelBinarizer, MultiLabelBinarizer,
Normalizer, PolynomialFeatures, Butterworth,
EWMA, DoubleEWMA)
random = np.random.RandomState(42)
trajs = [random.randn(100, 3) for _ in range(5)]
labels = [random.randint(low=0, high=5, size=100).reshape(-1, 1)
for _ in range(5)]
def test_butterworth():
butterworth = Butterworth()
y1 = butterworth.fit_transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_ewma():
ewma = EWMA(span=5)
y1 = ewma.fit_transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_doubleewma():
dewma = DoubleEWMA(span=5)
y1 = dewma.fit_transform(trajs)
assert len(y1) == len(trajs)
assert any(np.abs(y1[0] - trajs[0]).ravel() > 1E-5)
def test_binarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.Binarizer
# with sklearn.preprocessing.Binarizer
binarizerr = BinarizerR()
binarizerr.fit(np.concatenate(trajs))
binarizer = Binarizer()
binarizer.fit(trajs)
y_ref1 = binarizerr.transform(trajs[0])
y1 = binarizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_FT, 'this test requires sklearn >0.17.0')
def test_functiontransformer_vs_sklearn():
# Compare msmbuilder.preprocessing.FunctionTransformer
# with sklearn.preprocessing.FunctionTransformer
functiontransformerr = FunctionTransformerR()
functiontransformerr.fit(np.concatenate(trajs))
functiontransformer = FunctionTransformer()
functiontransformer.fit(trajs)
y_ref1 = functiontransformerr.transform(trajs[0])
y1 = functiontransformer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_imputer_vs_sklearn():
# Compare msmbuilder.preprocessing.Imputer
# with sklearn.preprocessing.Imputer
imputerr = ImputerR()
imputerr.fit(np.concatenate(trajs))
imputer = Imputer()
imputer.fit(trajs)
y_ref1 = imputerr.transform(trajs[0])
y1 = imputer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_kernelcenterer_vs_sklearn():
# Compare msmbuilder.preprocessing.KernelCenterer
# with sklearn.preprocessing.KernelCenterer
kernelcentererr = KernelCentererR()
kernelcentererr.fit(np.concatenate(trajs))
kernelcenterer = KernelCenterer()
kernelcenterer.fit(trajs)
y_ref1 = kernelcentererr.transform(trajs[0])
y1 = kernelcenterer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_labelbinarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.LabelBinarizer
# with sklearn.preprocessing.LabelBinarizer
labelbinarizerr = LabelBinarizerR()
labelbinarizerr.fit(np.concatenate(labels))
labelbinarizer = LabelBinarizer()
labelbinarizer.fit(labels)
y_ref1 = labelbinarizerr.transform(labels[0])
y1 = labelbinarizer.transform(labels)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_multilabelbinarizer_vs_sklearn():
# Compare msmbuilder.preprocessing.MultiLabelBinarizer
# with sklearn.preprocessing.MultiLabelBinarizer
multilabelbinarizerr = MultiLabelBinarizerR()
multilabelbinarizerr.fit(np.concatenate(trajs))
multilabelbinarizer = MultiLabelBinarizer()
multilabelbinarizer.fit(trajs)
y_ref1 = multilabelbinarizerr.transform(trajs[0])
y1 = multilabelbinarizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_MMS, 'this test requires sklearn >0.17.0')
def test_minmaxscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.MinMaxScaler
# with sklearn.preprocessing.MinMaxScaler
minmaxscalerr = MinMaxScalerR()
minmaxscalerr.fit(np.concatenate(trajs))
minmaxscaler = MinMaxScaler()
minmaxscaler.fit(trajs)
y_ref1 = minmaxscalerr.transform(trajs[0])
y1 = minmaxscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_MAS, 'this test requires sklearn >0.17.0')
def test_maxabsscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.MaxAbsScaler
# with sklearn.preprocessing.MaxAbsScaler
maxabsscalerr = MaxAbsScalerR()
maxabsscalerr.fit(np.concatenate(trajs))
maxabsscaler = MaxAbsScaler()
maxabsscaler.fit(trajs)
y_ref1 = maxabsscalerr.transform(trajs[0])
y1 = maxabsscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_normalizer_vs_sklearn():
# Compare msmbuilder.preprocessing.Normalizer
# with sklearn.preprocessing.Normalizer
normalizerr = NormalizerR()
normalizerr.fit(np.concatenate(trajs))
normalizer = Normalizer()
normalizer.fit(trajs)
y_ref1 = normalizerr.transform(trajs[0])
y1 = normalizer.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_RS, 'this test requires sklearn >0.17.0')
def test_robustscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.RobustScaler
# with sklearn.preprocessing.RobustScaler
robustscalerr = RobustScalerR()
robustscalerr.fit(np.concatenate(trajs))
robustscaler = RobustScaler()
robustscaler.fit(trajs)
y_ref1 = robustscalerr.transform(trajs[0])
y1 = robustscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
@skipif(not HAVE_SS, 'this test requires sklearn >0.17.0')
def test_standardscaler_vs_sklearn():
# Compare msmbuilder.preprocessing.StandardScaler
# with sklearn.preprocessing.StandardScaler
standardscalerr = StandardScalerR()
standardscalerr.fit(np.concatenate(trajs))
standardscaler = StandardScaler()
standardscaler.fit(trajs)
y_ref1 = standardscalerr.transform(trajs[0])
y1 = standardscaler.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_polynomialfeatures_vs_sklearn():
# Compare msmbuilder.preprocessing.PolynomialFeatures
# with sklearn.preprocessing.PolynomialFeatures
polynomialfeaturesr = PolynomialFeaturesR()
polynomialfeaturesr.fit(np.concatenate(trajs))
polynomialfeatures = PolynomialFeatures()
polynomialfeatures.fit(trajs)
y_ref1 = polynomialfeaturesr.transform(trajs[0])
y1 = polynomialfeatures.transform(trajs)[0]
np.testing.assert_array_almost_equal(y_ref1, y1)
| lgpl-2.1 |
equialgo/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 112 | 3203 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
IniterWorker/epitech-stats-notes | gui/gui.py | 1 | 1264 | import tkinter as tk
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from config import Configure
matplotlib.use("TkAgg")
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.create_widgets()
self.master.wm_minsize(800, 600)
def create_widgets(self):
self.hi_there = tk.Button(self)
self.hi_there["text"] = "Hello World\n(click me)"
self.hi_there["command"] = self.say_hi
self.hi_there.pack(side="top")
self.quit = tk.Button(self, text="QUIT", fg="red", command=root.destroy)
self.quit.pack(side="bottom")
f = Figure(figsize=(5, 5), dpi=100)
a = f.add_subplot(111)
a.plot([1, 2, 3, 4, 5, 6, 7, 8], [5, 6, 1, 3, 8, 9, 3, 5])
canvas = FigureCanvasTkAgg(f, self)
canvas.show()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
def say_hi(self):
print("hi there, everyone!")
def run_gui_mode():
root = tk.Tk()
root.geometry("800x600")
window = Application(master=root)
window.master.title("Epitech Stats")
window.mainloop()
| mit |
bcantarel/bcantarel.github.io | bicf_nanocourses/courses/python_1/check_versions.py | 1 | 2240 | #!/usr/bin/env python3
'''Check if course required python version and packages are installed.'''
# 2018-01-30 David.Trudgian@UTSouthwestern.edu
# Quick and dirty script to check for course-required
# python version and packages.
import logging
import sys
logger = logging.getLogger("check_versions")
logger.setLevel(logging.INFO)
logger_stream = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)6s] %(message)s")
logger_stream.setFormatter(formatter)
logger.addHandler(logger_stream)
REQUIRED_PYTHON = '3.6.4'
REQUIRED_PACKAGES = {
'numpy': '1.14.0',
'scipy': '1.0.0',
'pandas': '0.22.0',
'matplotlib': '2.1.2',
'seaborn': '0.8.1',
'bokeh': '0.12.13',
'spyder': '3.2.6',
}
def check_package_version(package_name, required_version):
'''Check package is available and matches desired version.'''
logger.info("Checking for %s %s", package_name, required_version)
try:
mod = __import__(package_name)
except ImportError:
logger.error("Package %s not available", package_name)
return False
if mod.__version__ != required_version:
logger.error("%s should be %s, found %s",
package_name, required_version, mod.__version__)
return False
return True
def main():
environment_ok = True
logger.info(
"Hello - we're checking if your system is ready for the Python 1 Nanocourse")
# Check Python Version
if sys.version[:5] != REQUIRED_PYTHON:
logger.error("Python should be %s, found %s",
REQUIRED_PYTHON, sys.version.splitlines()[0])
else:
logger.info("Python version OK!")
# Check Package Versions
for package_name, required_version in REQUIRED_PACKAGES.items():
if not check_package_version(package_name, required_version):
environment_ok = False
if environment_ok:
print("\nWoo! - Ready to go, see you at the nanocourse :-)\n")
else:
print("\nUh-oh! - Your environment isn't quite right")
print("Please check you followed the setup instructions and contact the")
print("course co-ordinator if you continue to have problems.\n")
if __name__ == "__main__":
main()
| gpl-3.0 |
shikhardb/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
euri10/zipline | tests/risk/answer_key.py | 39 | 11989 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
| apache-2.0 |
kcrandall/Kaggle_Mercedes_Manufacturing | spark/experiements/reza/get_type_lists.py | 6 | 1232 | def get_type_lists(frame, rejects=['Id', 'ID','id'],frame_type='h2o'):
"""Creates lists of numeric and categorical variables.
:param frame: The frame from which to determine types.
:param rejects: Variable names not to be included in returned lists.
:param frame_type: The type of frame being used. Accepted: ['h2o','pandas','spark']
:return: Tuple of lists for numeric and categorical variables in the frame.
"""
#Handle spark type data frames
if frame_type == 'spark':
nums, cats = [], []
for key, val in frame.dtypes:
if key not in rejects:
if val == 'string':
cats.append(key)
else: # ['int','double']
nums.append(key)
print('Numeric =', nums)
print()
print('Categorical =', cats)
return nums, cats
else:
nums, cats = [], []
for key, val in frame.types.items():
if key not in rejects:
if val == 'enum':
cats.append(key)
else:
nums.append(key)
print('Numeric =', nums)
print()
print('Categorical =', cats)
return nums, cats
| mit |
nok/sklearn-porter | examples/estimator/classifier/RandomForestClassifier/js/basics_embedded.pct.py | 1 | 1213 | # %% [markdown]
# # sklearn-porter
#
# Repository: [https://github.com/nok/sklearn-porter](https://github.com/nok/sklearn-porter)
#
# ## RandomForestClassifier
#
# Documentation: [sklearn.ensemble.RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
# %%
import sys
sys.path.append('../../../../..')
# %% [markdown]
# ### Load data
# %%
from sklearn.datasets import load_iris
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
print(X.shape, y.shape)
# %% [markdown]
# ### Train classifier
# %%
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=15, max_depth=None,
min_samples_split=2, random_state=0)
clf.fit(X, y)
# %% [markdown]
# ### Transpile classifier
# %%
from sklearn_porter import Porter
porter = Porter(clf, language='js')
output = porter.export(embed_data=True)
print(output)
# %% [markdown]
# ### Run classification in JavaScript
# %%
# Save classifier:
# with open('RandomForestClassifier.js', 'w') as f:
# f.write(output)
# Run classification:
# if hash node 2/dev/null; then
# node RandomForestClassifier.js 1 2 3 4
# fi
| mit |
luchko/latticegraph_designer | latticegraph_designer/app/main.py | 1 | 19862 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017, Ivan Luchko and Project Contributors
Licensed under the terms of the MIT License
https://github.com/luchko/latticegraph_designer
This module contains the definition of the app MainWindow.
class MainWindow(QMainWindow, Ui_MainWindow):
def run():
Module is compatible with both pyQt4 and pyQt5
"""
from __future__ import division
import matplotlib
# define pyQt version
try:
import PyQt4 as PyQt
pyQtVersion = "PyQt4"
except ImportError:
try:
import PyQt5 as PyQt
pyQtVersion = "PyQt5"
except ImportError:
raise ImportError("neither PyQt4 or PyQt5 is found")
# imports requied PyQt modules
if pyQtVersion == "PyQt4":
# Make sure that we are using QT5
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# impoort pyQt modules
from PyQt4.uic import loadUiType
from PyQt4.QtCore import Qt, pyqtSignal
from PyQt4.QtGui import (QApplication, QMessageBox, QFileDialog,
QListWidgetItem, QPushButton, QHBoxLayout,
QVBoxLayout)
else:
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
# impoort pyQt modules
from PyQt5.uic import loadUiType
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtWidgets import (QApplication, QMessageBox, QFileDialog,
QListWidgetItem, QPushButton, QHBoxLayout,
QVBoxLayout)
def getPathString(output):
'''
returns a path string of the QFileDialog output
pyQt5 returns a tuple (path, filter) not just a path QString like pyQt4
'''
return str(output if pyQtVersion == "PyQt4" else output[0])
# import python libs
import os
import sys
import webbrowser
import xml.etree.ElementTree as ET
from matplotlib.figure import Figure
# import project modules
from mpl_animationmanager import QDialogAnimManager
from latticegraph_designer.app.mpl_pane import GraphEdgesEditor
from latticegraph_designer.app.core import CrystalCluster, ParseXML, ExportXML, DealXML
from latticegraph_designer.app.dialogs import (QNotImplemented, DialogExportLG,
DialogSelectLG, DialogImportCryst,
DialogEditXML, MyDialogPreferences,
DialogDistSearch, DialogChangeEdgeType)
# import UI layout created in designer
ui_folder = os.path.dirname(__file__)+'/../resources/ui_layout/'
#ui_folder = 'latticegraph_designer/resources/ui_layout/'
Ui_MainWindow, QMainWindow = loadUiType(ui_folder+'MainWindow_GUI.ui')
class MainWindow(QMainWindow, Ui_MainWindow):
'''Main application window'''
selectedEdgeChanged = pyqtSignal(object)
selectedEdgeChangedList = pyqtSignal(object) #when edge selected in QListWidget
unitCellChanged = pyqtSignal()
latticeVisibleChanged = pyqtSignal(object) # used to bind with mpl.event
arrowsVisibleChanged = pyqtSignal(object) # used to bind with mpl.event
def __init__(self, fileName=None, TEXT_MODE=True):
super(MainWindow, self).__init__()
self.setupUi(self)
self.prefFileName = os.path.dirname(__file__)+'/../resources/preferences.xml'
self.SETTINGS = ET.parse(self.prefFileName).getroot()
self.CURRENT_THEME = DealXML.get_child_by_name(self.SETTINGS,"THEME","Current theme")
self.TEXT_MODE = TEXT_MODE
self.size = (2,2,2)
self.spinBox_sizeL.setValue(self.size[0])
self.spinBox_sizeW.setValue(self.size[1])
self.spinBox_sizeH.setValue(self.size[2])
self.spinBox_type.clear()
self.radioButton_output.setChecked(TEXT_MODE)
self.setup_mpl_canvas()
# initialize canvas
path = self.prefFileName if fileName is None else fileName
self.importXML_fromFile(path)
self.fileNameXML = fileName
self.label_fileNameXML.setText("XML library file: "
+ self.getFileLabelText())
self.msb_noActiveEdge = QMessageBox()
self.msb_noActiveEdge.setIcon(QMessageBox.Critical)
self.msb_noActiveEdge.setWindowTitle("Message")
self.msb_noActiveEdge.setStandardButtons(QMessageBox.Ok)
self.msb_noActiveEdge.setText("No edge is selected")
# setup signals and slots
self.btnEditXML.clicked.connect(self.editXML_callback)
self.spinBox_sizeL.valueChanged.connect(self.changeSize_callback)
self.spinBox_sizeW.valueChanged.connect(self.changeSize_callback)
self.spinBox_sizeH.valueChanged.connect(self.changeSize_callback)
self.btnDel.clicked.connect(self.delteEdge_callback)
self.btnClear.clicked.connect(self.gee.clearEdges_callback)
self.btnChangeType.clicked.connect(self.changeType_callback)
self.btnLength.clicked.connect(self.addDistEdges_callback)
self.listEdges.currentItemChanged.connect(self.selectEdgeList_callback)
self.radioButton_output.toggled.connect(self.change_textMode)
self.selectedEdgeChanged.connect(self.selectEdgeSignal_slot)
self.unitCellChanged.connect(self.update_listEdges)
self.setup_menu()
if self.TEXT_MODE:
print(self.gee.__doc__)
def setup_menu(self):
'''setup slot for menu actions'''
# configure menuFile
self.action_ImportXML.triggered.connect(self.importXMLdlg_callback)
self.action_ImportCryst.triggered.connect(self.importCryst_callback)
self.action_SaveXML.triggered.connect(self.saveXML_callback)
self.action_SaveXML_as.triggered.connect(self.saveXML_as_callback)
self.action_ExportIMG.triggered.connect(self.exportIMG_callback)
self.action_ExportAnim.triggered.connect(self.exportAnim_callback)
self.action_Quit.triggered.connect(self.quit_callback)
# configure menuEdit
self.action_EditXML.triggered.connect(self.editXML_callback)
self.action_AddSimEdges.triggered.connect(self.addSimEdges_callback)
self.action_AddDistEdges.triggered.connect(self.addDistEdges_callback)
self.action_ChangeType.triggered.connect(self.menuChangeType_callback)
self.action_DelEdge.triggered.connect(self.delteEdge_callback)
self.action_ClearEdges.triggered.connect(self.gee.clearEdges_callback)
self.action_Pref.triggered.connect(self.preferences_callback)
# configure menuHelo
self.action_About.triggered.connect(self.about_callback)
self.action_Doc.triggered.connect(self.doc_callback)
def setup_mpl_canvas(self):
'''
setup matplotlib manipulation pane widget
for displaying and editing lattice graph
'''
self.dpi = 100
self.fig = Figure((5.0, 5.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.ax = self.fig.gca(projection='3d')
self.fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
self.canvas.setParent(self.mplWidget)
self.mplLayout.addWidget(self.canvas)
self.canvas.setFocusPolicy(Qt.ClickFocus)
self.canvas.setFocus()
# add aniation button
self.btnPlay = QPushButton("Animate")
self.btnPlay.setStatusTip("Open animation manager.")
self.btnPlay.clicked.connect(self.exportAnim_callback)
self.btnPlay.setFocusPolicy(Qt.NoFocus)
mplHbox = QHBoxLayout()
mplHbox.addWidget(self.btnPlay)
mplHbox.addStretch()
mplVbox = QVBoxLayout()
mplVbox.addLayout(mplHbox)
mplVbox.addStretch()
self.canvas.setLayout(mplVbox)
def importXML_fromFile(self, path):
'''import lattice graph form xml file'''
self.fileNameXML = path
self.parser = ParseXML(fileName = self.fileNameXML)
LG_name_list = self.parser.get_LATTICEGRAPH_names()
if len(LG_name_list) > 1:
self.dlgSelectLG = DialogSelectLG(self, LG_name_list)
self.dlgSelectLG.show()
else:
self.importXml(LG_name_list[0])
def importXml(self, LG_name):
'''import lattice graph with given name from predefined parser - ParseXML object'''
self.LATTICEGRAPH_name = LG_name
self.label_fileNameXML.setText("XML library file: "+self.getFileLabelText())
self.label_LG_name.setText("Lattice graph name: "+self.LATTICEGRAPH_name)
if self.parser is None:
raise ValueError("Parser is not defined")
self.lattice, self.UC = self.parser.parse_LATTICEGRAPH(self.LATTICEGRAPH_name)
self.cluster = CrystalCluster(self.UC, self.lattice, self.size)
self.ax.clear()
self.gee = GraphEdgesEditor(self.ax, self.cluster, parent=self,
display_report=True)
self.canvas.draw()
self.update_listEdges()
self.unitCellChanged.emit()
def update_listEdges(self):
'''is used to update QListWidget when unit cell is changed'''
self.initialization = True # block QListWidget valuechanged callback
self.listEdges.clear()
defaultListItem = QListWidgetItem('')
self.listEdges_idToItem = {None: defaultListItem}
self.listEdges_ItemToId = {defaultListItem.text(): None}
for key, edge in self.gee.UC.edges.items():
newItem = QListWidgetItem(str(edge))
self.listEdges.addItem(newItem)
self.listEdges_idToItem[key] = newItem
self.listEdges_ItemToId[newItem.text()] = key
self.listEdges.addItem(defaultListItem)
self.listEdges.setCurrentItem(defaultListItem)
self.initialization = False # relieze QListWidget valuechanged callback
def changeSize_callback(self):
'''called when cluter size in spinBox is chanaged'''
self.size = (self.spinBox_sizeL.value(),
self.spinBox_sizeW.value(),
self.spinBox_sizeH.value())
self.gee.reset_size(self.size)
def changeType_callback(self):
'''called when value of self.spinBox_type is changed'''
if self.gee.e_active_ind is None:
self.msb_noActiveEdge.exec_()
else:
self.gee.change_active_edge_type(self.spinBox_type.value())
def selectEdgeList_callback(self, selectedItem):
'''called when edge is selected in QListWidget'''
if not self.initialization:
activeEdge_id = self.listEdges_ItemToId[selectedItem.text()]
self.gee.select_edge(activeEdge_id)
self.selectedEdgeChangedList.emit(activeEdge_id)
if activeEdge_id is None:
msg = " active edge unselected"
self.spinBox_type.clear()
else:
msg = " selected edge: {}".format(self.cluster.UC.edges[activeEdge_id])
_type = self.cluster.UC.edges[activeEdge_id].type
self.spinBox_type.setValue(_type)
self.statusBar().showMessage(msg, 2000)
if self.TEXT_MODE:
print(msg)
def selectEdgeSignal_slot(self, activeEdge_id):
'''Process selecting edge signal'''
activeItem = self.listEdges_idToItem[activeEdge_id]
self.listEdges.setCurrentItem(activeItem)
def change_textMode(self, _bool):
'''turn on/off printing actions into terminal'''
self.TEXT_MODE = _bool
self.gee.display_report = _bool
msg = " displaying actions in terminal is turned {}".format("on" if _bool else "off")
self.statusBar().showMessage(msg, 2000)
print(msg)
def getFileLabelText(self):
'''Returns the label string of the xml library file'''
if self.fileNameXML is None:
return "None"
else:
fileName = os.path.basename(self.fileNameXML)
dirName = os.path.basename(os.path.dirname(self.fileNameXML))
return os.path.join("...", dirName, fileName)
def importXMLdlg_callback(self):
'''when import acttion is activated'''
output = QFileDialog.getOpenFileName(self,
'Open xml library containing Lattice Graph',
filter = "XML files (*.xml);;All files (*.*)")
path = getPathString(output)
if path != "":
self.importXML_fromFile(path)
def importCryst_callback(self):
'''import crystal providing lattice and unit cell parameters'''
self.dlgImportCryst = DialogImportCryst(self)
self.dlgImportCryst.show()
def saveXML_callback(self):
'''save changes to lattice graph xml library file'''
if self.fileNameXML == None:
self.saveXML_as_callback()
else:
self.exporter = ExportXML(self.gee.cluster.lattice,
self.gee.cluster.UC,
self.LATTICEGRAPH_name)
self.exporter.export_to_lib(self.fileNameXML)
def saveXML_as_callback(self):
'''save lattice graph to xml library file'''
dialog = DialogExportLG(self, self.LATTICEGRAPH_name,
self.cluster.lattice.atrib["BOUNDARY"])
if dialog.exec_():
self.LATTICEGRAPH_name = str(dialog.lineEdit_LGname.text())
self.gee.cluster.lattice.atrib["BOUNDARY"]= \
str(dialog.comboBox_boundary.currentText())
output = QFileDialog.getSaveFileName(self, filter="XML files (*.xml)")
path = getPathString(output)
# if not canceled
if path != '':
self.fileNameXML = path
self.exporter = ExportXML(self.gee.cluster.lattice,
self.gee.cluster.UC,
self.LATTICEGRAPH_name)
self.exporter.export_to_lib(self.fileNameXML)
self.label_fileNameXML.setText("XML library file: "+self.getFileLabelText())
def exportIMG_callback(self):
'''Savve image of the Heisenberg model (lattice graph)'''
output = QFileDialog.getSaveFileName(self,caption='Save model image',
filter="Images (*.png *.xpm *.jpg);;All files (*.*)")
path = getPathString(output)
if path != '':
self.exportIMG(path)
def exportIMG(self, path):
'''Savve image of the Heisenberg model (lattice graph)'''
self.canvas.print_figure(path, dpi=self.dpi, bbox_inches='tight', pad_inches=0)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def exportAnim_callback(self):
'''animate lattice graph mpl_pane and open animation manager'''
self.dlgExportAnim = QDialogAnimManager(self.ax)
self.dlgExportAnim.show()
# disable animated GraphEdgeEditor artists
self.gee.sc_active.set_visible(False)
self.gee.new_edge.set_visible(False)
# enabele animated GraphEdgeEditor artists
self.dlgExportAnim.closed.connect(self.gee.sc_active.set_visible)
self.dlgExportAnim.closed.connect(self.gee.new_edge.set_visible)
def quit_callback(self):
self.close()
def editXML_callback(self):
''' open lattice graph xml code editor'''
self.dlgEditXML = DialogEditXML(self)
self.dlgEditXML.show()
if self.TEXT_MODE:
print(" open lattice graph xml code editor")
def addSimEdges_callback(self):
'''search for and add edges that have the same length as selected one'''
if self.gee.e_active_ind is None:
self.msb_noActiveEdge.exec_()
else:
self.gee.searchActiveDistEdge_callback()
def addDistEdges_callback(self):
'''opens edge length manipulation manager'''
self.gee.select_edge(None)
self.selectEdgeSignal_slot(None)
self.dlgDistSearch = DialogDistSearch(self)
self.dlgDistSearch.show()
def menuChangeType_callback(self):
'''change selected edge type'''
if self.gee.e_active_ind is None:
self.msb_noActiveEdge.exec_()
else:
self.dlg = DialogChangeEdgeType(self)
self.dlg.show()
def delteEdge_callback(self):
'''delete selected edge'''
if self.gee.e_active_ind is None:
self.msb_noActiveEdge.exec_()
else:
self.gee.delete_active_edge_callback()
def preferences_callback(self):
'''Calls preference dialog'''
self.dlgPref = MyDialogPreferences(parent = self)
self.dlgPref.applySignal.connect(self.applyPref_callback)
self.arrowsVisibleChanged.connect(self.dlgPref.prefWidget.checkBox_arrows.setChecked)
self.latticeVisibleChanged.connect(self.dlgPref.prefWidget.checkBox_lattice.setChecked)
self.dlgPref.show()
def applyPref_callback(self):
'''when apply button is cklicked in DialogPreferences'''
self.gee.initialize_theme(self.CURRENT_THEME)
self.gee.set_artists_properties()
def about_callback(self):
'''display app help'''
self.msg = QMessageBox()
self.msg.setIcon(QMessageBox.Information)
self.msg.setTextFormat(Qt.RichText)
text = '''
<b>Lattice graph designer 1.0a1</b>
<br>
Copyright © 2017, Ivan Luchko and Project Contributors
<br>
Licensed under the terms of the MIT License
<br><br>
Lattice graph designer is a tool which allows to visualize and create
a lattice graph model using the intuitive GUI and interactive 3D drag-and-drop
graph manipulation pane.
<br><br>
It was primarily created for the
<a href="http://alps.comp-phys.org">ALPS project</a> to deal with a lattice graph of the
<a href="https://en.wikipedia.org/wiki/Heisenberg_model_(quantum)">Heisenberg model</a>
defined in <a href="http://alps.comp-phys.org/mediawiki/index.php/Tutorials:LatticeHOWTO">
ALPS xml graph format</a>.
<br><br>
Support of the other formats and projects can be extended.
<br><br>
For bug reports and feature requests, please go to our
<a href="https://github.com/luchko/latticegraph_designer">Github website</a>.
'''
self.msg.setText(text)
self.msg.setWindowTitle("About Lattice graph designer")
self.msg.setStandardButtons(QMessageBox.Ok)
self.msg.exec_()
def doc_callback(self):
'''open documentation'''
webbrowser.open_new_tab("https://latticegraph-designer.readthedocs.io")
def run():
'''run the application'''
# check if xml codefile is passed as an input
if len(sys.argv) == 2:
fn = sys.argv[1]
if os.path.exists(fn):
fn = os.path.abspath(fn)
else:
raise ValueError("file {} doesn't exist.".format(fn))
else:
fn = None
app = QApplication(sys.argv)
mainWindow = MainWindow(fileName=fn)
mainWindow.show()
sys.exit(app.exec_())
if __name__ == '__main__':
sys.exit(run()) | mit |
wwf5067/statsmodels | statsmodels/stats/anova.py | 25 | 13433 | from statsmodels.compat.python import lrange, lmap
import numpy as np
from scipy import stats
from pandas import DataFrame, Index
from statsmodels.formula.formulatools import (_remove_intercept_patsy,
_has_intercept, _intercept_idx)
def _get_covariance(model, robust):
if robust is None:
return model.cov_params()
elif robust == "hc0":
se = model.HC0_se
return model.cov_HC0
elif robust == "hc1":
se = model.HC1_se
return model.cov_HC1
elif robust == "hc2":
se = model.HC2_se
return model.cov_HC2
elif robust == "hc3":
se = model.HC3_se
return model.cov_HC3
else: # pragma: no cover
raise ValueError("robust options %s not understood" % robust)
#NOTE: these need to take into account weights !
def anova_single(model, **kwargs):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
typ : int or str {1,2,3} or {"I","II","III"}
Type of sum of squares to use.
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
typ = kwargs.get("typ", 1)
robust = kwargs.get("robust", None)
if robust:
robust = robust.lower()
endog = model.model.endog
exog = model.model.exog
nobs = exog.shape[0]
response_name = model.model.endog_names
design_info = model.model.data.design_info
exog_names = model.model.exog_names
# +1 for resids
n_rows = (len(design_info.terms) - _has_intercept(design_info) + 1)
pr_test = "PR(>%s)" % test
names = ['df', 'sum_sq', 'mean_sq', test, pr_test]
table = DataFrame(np.zeros((n_rows, 5)), columns = names)
if typ in [1,"I"]:
return anova1_lm_single(model, endog, exog, nobs, design_info, table,
n_rows, test, pr_test, robust)
elif typ in [2, "II"]:
return anova2_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [3, "III"]:
return anova3_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [4, "IV"]:
raise NotImplemented("Type IV not yet implemented")
else: # pragma: no cover
raise ValueError("Type %s not understood" % str(typ))
def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,
pr_test, robust):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
#maybe we should rethink using pinv > qr in OLS/linear models?
effects = getattr(model, 'effects', None)
if effects is None:
q,r = np.linalg.qr(exog)
effects = np.dot(q.T, endog)
arr = np.zeros((len(design_info.terms), len(design_info.column_names)))
slices = [design_info.slice(name) for name in design_info.term_names]
for i,slice_ in enumerate(slices):
arr[i, slice_] = 1
sum_sq = np.dot(arr, effects**2)
#NOTE: assumes intercept is first column
idx = _intercept_idx(design_info)
sum_sq = sum_sq[~idx]
term_names = np.array(design_info.term_names) # want boolean indexing
term_names = term_names[~idx]
index = term_names.tolist()
table.index = Index(index + ['Residual'])
table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]
if test == 'F':
table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/
(model.ssr/model.df_resid))
table.ix[:n_rows, pr_test] = stats.f.sf(table["F"], table["df"],
model.df_resid)
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
table['mean_sq'] = table['sum_sq'] / table['df']
return table
#NOTE: the below is not agnostic about formula...
def anova2_lm_single(model, design_info, n_rows, test, pr_test, robust):
"""
ANOVA type II table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
Type II
Sum of Squares compares marginal contribution of terms. Thus, it is
not particularly useful for models with significant interaction terms.
"""
terms_info = design_info.terms[:] # copy
terms_info = _remove_intercept_patsy(terms_info)
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, None)
robust_cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab all varaibles except interaction effects that contain term
# need two hypotheses matrices L1 is most restrictive, ie., term==0
# L2 is everything except term==0
cols = design_info.slice(term)
L1 = lrange(cols.start, cols.stop)
L2 = []
term_set = set(term.factors)
for t in terms_info: # for the term you have
other_set = set(t.factors)
if term_set.issubset(other_set) and not term_set == other_set:
col = design_info.slice(t)
# on a higher order term containing current `term`
L1.extend(lrange(col.start, col.stop))
L2.extend(lrange(col.start, col.stop))
L1 = np.eye(model.model.exog.shape[1])[L1]
L2 = np.eye(model.model.exog.shape[1])[L2]
if L2.size:
LVL = np.dot(np.dot(L1,robust_cov),L2.T)
from scipy import linalg
orth_compl,_ = linalg.qr(LVL)
r = L1.shape[0] - L2.shape[0]
# L1|2
# use the non-unique orthogonal completion since L12 is rank r
L12 = np.dot(orth_compl[:,-r:].T, L1)
else:
L12 = L1
r = L1.shape[0]
#from IPython.core.debugger import Pdb; Pdb().set_trace()
if test == 'F':
f = model.f_test(L12, cov_p=robust_cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova3_lm_single(model, design_info, n_rows, test, pr_test, robust):
n_rows += _has_intercept(design_info)
terms_info = design_info.terms
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab term, hypothesis is that term == 0
cols = design_info.slice(term)
L1 = np.eye(model.model.exog.shape[1])[cols]
L12 = L1
r = L1.shape[0]
if test == 'F':
f = model.f_test(L12, cov_p=cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
#col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
#NOTE: Don't need to sort because terms are an ordered dict now
#table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova_lm(*args, **kwargs):
"""
ANOVA table for one or more fitted linear models.
Parameters
----------
args : fitted linear model results instance
One or more fitted linear models
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
typ : str or int {"I","II","III"} or {1,2,3}
The type of ANOVA test to perform. See notes.
robust : {None, "hc0", "hc1", "hc2", "hc3"}
Use heteroscedasticity-corrected coefficient covariance matrix.
If robust covariance is desired, it is recommended to use `hc3`.
Returns
-------
anova : DataFrame
A DataFrame containing.
Notes
-----
Model statistics are given in the order of args. Models must have
been fit using the formula api.
See Also
--------
model_results.compare_f_test, model_results.compare_lm_test
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.formula.api import ols
>>> moore = sm.datasets.get_rdataset("Moore", "car",
... cache=True) # load data
>>> data = moore.data
>>> data = data.rename(columns={"partner.status" :
... "partner_status"}) # make name pythonic
>>> moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
... data=data).fit()
>>> table = sm.stats.anova_lm(moore_lm, typ=2) # Type 2 ANOVA DataFrame
>>> print table
"""
typ = kwargs.get('typ', 1)
### Farm Out Single model ANOVA Type I, II, III, and IV ###
if len(args) == 1:
model = args[0]
return anova_single(model, **kwargs)
try:
assert typ in [1,"I"]
except:
raise ValueError("Multiple models only supported for type I. "
"Got type %s" % str(typ))
### COMPUTE ANOVA TYPE I ###
# if given a single model
if len(args) == 1:
return anova_single(*args, **kwargs)
# received multiple fitted models
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
n_models = len(args)
model_formula = []
pr_test = "Pr(>%s)" % test
names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]
table = DataFrame(np.zeros((n_models, 6)), columns = names)
if not scale: # assume biggest model is last
scale = args[-1].scale
table["ssr"] = lmap(getattr, args, ["ssr"]*n_models)
table["df_resid"] = lmap(getattr, args, ["df_resid"]*n_models)
table.ix[1:, "df_diff"] = -np.diff(table["df_resid"].values)
table["ss_diff"] = -table["ssr"].diff()
if test == "F":
table["F"] = table["ss_diff"] / table["df_diff"] / scale
table[pr_test] = stats.f.sf(table["F"], table["df_diff"],
table["df_resid"])
# for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan
table[pr_test][table['F'].isnull()] = np.nan
return table
if __name__ == "__main__":
import pandas
from statsmodels.formula.api import ols
# in R
#library(car)
#write.csv(Moore, "moore.csv", row.names=FALSE)
moore = pandas.read_table('moore.csv', delimiter=",", skiprows=1,
names=['partner_status','conformity',
'fcategory','fscore'])
moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
data=moore).fit()
mooreB = ols('conformity ~ C(partner_status, Sum)', data=moore).fit()
# for each term you just want to test vs the model without its
# higher-order terms
# using Monette-Fox slides and Marden class notes for linear algebra /
# orthogonal complement
# https://netfiles.uiuc.edu/jimarden/www/Classes/STAT324/
table = anova_lm(moore_lm, typ=2)
| bsd-3-clause |
hitszxp/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
aewallin/allantools | examples/ieee1139_randomwalk_fm.py | 2 | 3783 | import allantools
import allantools.noise as noise
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import math
# ieee1139-table for random walk (Brownian) FM
# produces synthetic dataset with given PSD and compares against
# the predicted S_y, S_fi, S_x, and ADEV given in the table
# AW 2015-08-06
# from the ieee1139 table
# PSD_y(f) = h2 * f^-2 fractional frequency PSD
# PSD_fi(f) = h2 * vo^2 * f^-4 phase (radians) PSD
# PSD_x(f) = h2 * (2 pi)^-2 * f^-4 phase (time) PSD
# ADEV_y(tau) = sqrt{ 2*pi^2/3 * h2 * tau } Allan deviation
# sampling frequency in Hz (code should work for any non-zero value here)
fs = 12.8
h2 = 2e-20 # PSD f^-2 coefficient
N = 10*4096 # number of samples
v0 = 1.2345e6 # nominal oscillator frequency
y = noise.brown(num_points=N, b2=h2, fs=fs) # fractional frequency
x = allantools.frequency2phase(y, fs) # phase in seconds
fi = [2*math.pi*v0*xx for xx in x] # phase in radians
t = np.linspace(0, (1.0/fs)*N, len(y)) # time-series time axis
# time-series figure
plt.figure()
fig, ax1 = plt.subplots()
ax1.plot(t, y, label='y')
ax2 = ax1.twinx()
ax2.plot(t, x[1:], label='x')
plt.legend()
plt.xlabel('Time / s')
plt.ylabel('Fractional frequency')
# note: calculating the PSD of an 1/f^4 signal using fft seems to be difficult
# the welch method returns a correct 1/f^4 shaped PSD, but fft often does not
# things that may help
# - using a longer time-series (increase N above)
# - detrend using signal.detrend()
# - calculate PSD for a short window of data, and average over windows
# (this is done in the Welch method)
# - read the Welch code to see what is going on
f_y, psd_y = noise.numpy_psd(y, fs)
f_fi, psd_fi = noise.numpy_psd(signal.detrend(fi[:len(fi)/20]), fs)
f_x, psd_x = noise.numpy_psd(x[:len(x)/20], fs)
fxx, Pxx_den = noise.scipy_psd(y, fs)
f_fi2, psd_fi2 = noise.scipy_psd(fi, fs)
f_x2, psd_x2 = noise.scipy_psd(x, fs)
# Fractional frequency PSD
plt.figure()
plt.loglog(f_y, psd_y, label='numpy.fft()')
plt.loglog(fxx, Pxx_den, label='scipy.signal.welch()')
plt.loglog(f_y[1:], [h2/(ff*ff) for ff in f_y[1:]],
label='h_2/f^2 with h_2 = %.3g' % h2)
plt.legend(framealpha=0.5)
plt.title('PSD of fractional frequency')
plt.grid()
plt.xlabel('Frequeny / Hz')
plt.ylabel('one-sided PSD / S_y(f)')
# Phase (radians) PSD
plt.figure()
plt.loglog(f_fi, psd_fi, label='numpy.fft()')
plt.loglog(f_fi2, psd_fi2, label='scipy.signal.welch()')
plt.loglog(f_fi[1:], [h2*v0*v0/(ff**4.0)
for ff in f_fi[1:]], label='h_2 * v0^2 * f^-4')
plt.legend(framealpha=0.5)
plt.title('PSD of phase (radians)')
plt.xlabel('Frequeny / Hz')
plt.ylabel('one-sided PSD / S_fi(f)')
plt.grid()
# Phase (time) PSD
plt.figure()
plt.loglog(f_x, psd_x, label='numpy.fft()')
plt.loglog(f_x2, psd_x2, label='scipy.signal.welch()')
plt.loglog(f_x[1:], [h2/((2*math.pi)**2 * ff**4.0)
for ff in f_x[1:]], label='h2 * (2 pi)^-2 * f^-4')
plt.legend(framealpha=0.5)
plt.title('PSD of phase (time)')
plt.xlabel('Frequeny / Hz')
plt.ylabel('one-sided PSD / S_x(f)')
plt.grid()
plt.figure()
taus = [tt for tt in np.logspace(-2.2, 4, 100)]
(taus_y, devs_y, errs_y, ns_y) = allantools.oadev(
y, rate=fs, data_type='freq', taus=taus)
(taus_x, devs_x, errs_x, ns_x) = allantools.oadev(x, rate=fs, taus=taus)
plt.loglog(taus_y, devs_y, 'o', label='ADEV from y')
plt.loglog(taus_x, devs_x, '*', label='ADEV from x')
# sqrt{ 2*pi^2/3 * h2 * tau }
adev_y = [math.sqrt(((2*math.pi**2)/3)*h2*tt) for tt in taus]
plt.loglog(taus, adev_y, label='sqrt{ 2*pi^2/3 * h2 * tau }')
# plt.xlim((8e-3,1e3))
plt.legend(framealpha=0.6)
plt.title('Allan deviation')
plt.xlabel('Tau / s')
plt.ylabel('Allan deviation')
plt.grid()
plt.show()
| lgpl-3.0 |
gimli-org/gimli | doc/tutorials/dev/plot_XX_mod_fd_stokes-2d.py | 1 | 4526 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
# from solverFVM import (solveFiniteVolume,
# createFVPostProzessMesh, diffusionConvectionKernel()
def buildUpB(b, rho, dt, u, v, dx, dy):
b[1:-1, 1:-1] = rho*(1/dt*((u[2:, 1:-1] - u[0:-2, 1:-1])/(2*dx) +
(v[1:-1, 2:]-v[1:-1, 0:-2])/(2*dy)) -
((u[2:, 1:-1]-u[0:-2, 1:-1])/(2*dx))**2 -
2*((u[1:-1, 2:]-u[1:-1, 0:-2])/(2*dy) *
(v[2:, 1:-1]-v[0:-2, 1:-1])/(2*dx)) -
((v[1:-1, 2:]-v[1:-1, 0:-2])/(2*dy))**2)
return b
def presPoisson(p, dx, dy, b):
pn = np.empty_like(p)
pn = p.copy()
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = ((pn[2:, 1:-1]+pn[0:-2, 1:-1])*dy**2 +
(pn[1:-1, 2:]+pn[1:-1, 0:-2])*dx**2) / \
(2*(dx**2+dy**2)) - \
dx**2*dy**2/(2*(dx**2+dy**2))*b[1:-1, 1:-1]
p[-1, :] = p[-2, :] # dp/dy = 0 at y = 2
p[0, :] = p[1, :] # dp/dy = 0 at y = 0
p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
p[:, -1] = 0 # p = 0 at x = 2
return p
def cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu):
un = np.empty_like(u)
vn = np.empty_like(v)
b = np.zeros((ny, nx))
for n in range(nt):
un = u.copy()
vn = v.copy()
b = buildUpB(b, rho, dt, u, v, dx, dy)
p = presPoisson(p, dx, dy, b)
u[1:-1, 1:-1] = un[1:-1, 1:-1] - \
un[1:-1, 1:-1]*dt/dx*(un[1:-1, 1:-1] - un[0:-2, 1:-1]) - \
vn[1:-1, 1:-1]*dt/dy*(un[1:-1, 1:-1] - un[1:-1, 0:-2]) - \
dt/(2*rho*dx)*(p[2:, 1:-1] - p[0:-2, 1:-1]) + \
nu*(dt/dx**2*(un[2:, 1:-1]-2*un[1:-1, 1:-1] + un[0:-2, 1:-1]) +
dt/dy**2*(un[1:-1, 2:]-2*un[1:-1, 1:-1] + un[1:-1, 0:-2]))
v[1:-1, 1:-1] = vn[1:-1, 1:-1] - \
un[1:-1, 1:-1]*dt/dx*(vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) - \
vn[1:-1, 1:-1]*dt/dy*(vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) - \
dt/(2*rho*dy)*(p[1:-1, 2:]-p[1:-1, 0:-2]) + \
nu*(dt/dx**2*(vn[2:, 1:-1] - 2*vn[1:-1, 1:-1] + vn[0:-2, 1:-1]) +
dt/dy**2*(vn[1:-1, 2:]-2*vn[1:-1, 1:-1] + vn[1:-1, 0:-2]))
u[0, :] = 0
u[:, 0] = 0
u[:, -1] = 1
u[-1, :] = 0
v[0, :] = 0
v[-1, :] = 0
v[:, 0] = 0
v[:, -1] = 0
return u, v, p
nx = 41
ny = 41
nt = 500
nit = 50
c = 1
dx = 2.0 / (nx-1)
dy = 2.0 / (ny-1)
x = np.linspace(0, 2, nx)
y = np.linspace(0, 2, ny)
Y, X = np.meshgrid(y, x)
rho = 1
nu = .1
dt = .001
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
nt = 200
u, v, p = cavityFlow(nt, u, v, dt, dx, dy, p, rho, nu)
# fig = plt.figure(figsize=(11,7), dpi=100)
# ax1 = fig.add_subplot(1,3,1)
# ax2 = fig.add_subplot(1,3,2)
# ax3 = fig.add_subplot(1,3,3)
grid = pg.createGrid(x, y)
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
pl = pg.logTransDropTol(np.array((p.T).flat), 1e-2)
ul = pg.logTransDropTol(np.array((u.T).flat), 1e-2)
vl = pg.logTransDropTol(np.array((v.T).flat), 1e-2)
pg.show(grid, pl, logScale=False, showLater=True, colorBar=True, axes=ax1,
cmap='b2r')
pg.show(grid, ul, logScale=False, showLater=True, colorBar=True, axes=ax2)
pg.show(grid, vl, logScale=False, showLater=True, colorBar=True, axes=ax3)
vel = np.vstack([np.array((u.T).flat), np.array((v.T).flat)]).T
pg.viewer.mpl.drawStreams(ax1, grid, vel)
#im1 = ax1.contourf(X,Y,p,alpha=0.5) ###plnttong the pressure field as a contour
#divider1 = make_axes_locatable(ax1)
#cax1 = divider1.append_axes("right", size="20%", pad=0.05)
#cbar1 = plt.colorbar(im1, cax=cax1)
#im2 = ax2.contourf(X,Y,u,alpha=0.5) ###plnttong the pressure field as a contour
#divider2 = make_axes_locatable(ax2)
#cax2 = divider2.append_axes("right", size="20%", pad=0.05)
#cbar2 = plt.colorbar(im2, cax=cax2)
#im3 = ax3.contourf(X,Y,v,alpha=0.5) ###plnttong the pressure field as a contour
#divider3 = make_axes_locatable(ax3)
#cax3 = divider3.append_axes("right", size="20%", pad=0.05)
#cbar3 = plt.colorbar(im3, cax=cax3)
#ax1.contour(X,Y,p) ###plotting the pressure field outlines
#ax1.quiver(X[::2,::2],Y[::2,::2],u[::2,::2],v[::2,::2]) ##plotting velocity
#ax1.xlabel('X')
#ax1.ylabel('Y')
plt.show()
#drawMesh(ax, grid) | apache-2.0 |
MuhammedHasan/metabolitics | metabolitics/preprocessing/metabolitics_transformer.py | 1 | 1184 | from joblib import Parallel, delayed
from sklearn.base import TransformerMixin
from metabolitics.analysis import MetaboliticsAnalysis
class MetaboliticsTransformer(TransformerMixin):
"""Performs metabolitics analysis and
convert metabolitic value into reaction min-max values."""
def __init__(self, network_model="recon2", n_jobs=-1):
'''
:param network_model: cobra.Model or name of the model.
:param n_jobs: the maximum number of concurrently running jobs.
'''
self.analyzer = MetaboliticsAnalysis(network_model)
self.n_jobs = n_jobs
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
'''
:param X: list of dict which contains metabolic measurements.
'''
return Parallel(n_jobs=self.n_jobs)(delayed(self._transform)(x)
for x in X)
def _transform(self, x):
x_t = dict()
analyzer = self.analyzer.copy()
for r in analyzer.variability_analysis(x).itertuples():
x_t['%s_max' % r.Index] = r.maximum
x_t['%s_min' % r.Index] = r.minimum
return x_t
| gpl-3.0 |
rahulremanan/python_tutorial | Machine_Vision/01_Transfer_Learning/src/transfer_learning.py | 1 | 65186 | # !/usr/bin/python3.6
# -*- coding: utf-8 -*-
# Transfer learning using Keras and Tensorflow.
# Written by Rahul Remanan and MOAD (https://www.moad.computer) machine vision team.
# For more information contact: info@moad.computer
# License: MIT open source license
# Repository: https://github.com/rahulremanan/python_tutorial
import argparse
import os
import random
import time
import sys
import glob
try:
import h5py
except:
print ('Package h5py needed for saving model weights ...')
sys.exit(1)
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
import tensorflow
import keras
except:
print ('This code uses tensorflow deep-learning framework and keras api ...')
print ('Install tensorflow and keras to train the classifier ...')
sys.exit(1)
import PIL
from collections import defaultdict
from keras.applications.inception_v3 import InceptionV3, \
preprocess_input as preprocess_input_inceptionv3
from keras.applications.inception_resnet_v2 import InceptionResNetV2, \
preprocess_input as preprocess_input_inceptionv4
from keras.models import Model, \
model_from_json, \
load_model
from keras.layers import Dense, \
GlobalAveragePooling2D, \
Dropout, \
BatchNormalization
from keras.layers.merge import concatenate
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras.optimizers import SGD, \
RMSprop, \
Adagrad, \
Adadelta, \
Adam, \
Adamax, \
Nadam
from keras.callbacks import EarlyStopping, \
ModelCheckpoint, \
ReduceLROnPlateau
from multiprocessing import Process
from execute_in_shell import execute_in_shell
IM_WIDTH, IM_HEIGHT = 299, 299 # Default input image size for Inception v3 and v4 architecture
DEFAULT_EPOCHS = 100
DEFAULT_BATCHES = 20
FC_SIZE = 4096
DEFAULT_DROPOUT = 0.1
DEFAULT_NB_LAYERS_TO_FREEZE = 169
verbose = False
sgd = SGD(lr=1e-7, decay=0.5, momentum=1, nesterov=True)
rms = RMSprop(lr=1e-7, rho=0.9, epsilon=1e-08, decay=0.0)
ada = Adagrad(lr=1e-3, epsilon=1e-08, decay=0.0)
DEFAULT_OPTIMIZER = sgd
def generate_timestamp():
"""
A function to generate time-stamp information.
Calling the function returns a string formatted current system time.
Eg: 2018_10_10_10_10_10
Example usage: generate_timestamp()
"""
timestring = time.strftime("%Y_%m_%d-%H_%M_%S")
print ("Time stamp generated: " + timestring)
return timestring
timestr = generate_timestamp()
def is_valid_file(parser, arg):
"""
A function that checks if a give file path contains a valid file or not.
The function returns the full file path if there is a valid file persent.
If there is no valid file present at a file path location, it returns a parser error message.
Takes two positional arguments: parser and arg
Example usage:
import argsparse
a = argparse.ArgumentParser()
a.add_argument("--file_path",
help = "Check if a file exists in the specified file path ...",
dest = "file_path",
required=False,
type=lambda x: is_valid_file(a, x),
nargs=1)
args = a.parse_args()
args = get_user_options()
"""
if not os.path.isfile(arg):
try:
parser.error("The file %s does not exist ..." % arg)
return None
except:
if parser != None:
print ("No valid argument parser found ...")
print ("The file %s does not exist ..." % arg)
return None
else:
print ("The file %s does not exist ..." % arg)
return None
else:
return arg
def is_valid_dir(parser, arg):
"""
This function checks if a directory exists or not.
It can be used inside the argument parser.
Example usage:
import argsparse
a = argparse.ArgumentParser()
a.add_argument("--dir_path",
help = "Check if a file exists in the specified file path ...",
dest = "file_path",
required=False,
type=lambda x: is_valid_dir(a, x),
nargs=1)
args = a.parse_args()
args = get_user_options()
"""
if not os.path.isdir(arg):
try:
return parser.error("The folder %s does not exist ..." % arg)
except:
if parser != None:
print ("No valid argument parser found")
print ("The folder %s does not exist ..." % arg)
return None
else:
print ("The folder %s does not exist ..." % arg)
return None
else:
return arg
def string_to_bool(val):
"""
A function that checks if an user argument is boolean or not.
Example usage:
import argsparse
a = argparse.ArgumentParser()
a.add_argument("--some_bool_arg",
help = "Specify a boolean argument ...",
dest = "some_bool_arg",
required=False,
default=[True],
nargs=1,
type = string_to_bool)
args = a.parse_args()
args = get_user_options()
"""
if val.lower() in ('yes', 'true', 't', 'y', '1', 'yeah', 'yup'):
return True
elif val.lower() in ('no', 'false', 'f', 'n', '0', 'none', 'nope'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected ...')
def activation_val(val):
activation_function_options = ('hard_sigmoid',
'elu',
'linear',
'relu',
'selu',
'sigmoid',
'softmax',
'softplus',
'sofsign',
'tanh')
if val.lower() in activation_function_options:
return val
else:
raise argparse.ArgumentTypeError('Unexpected activation function. \
\nExpected values are: {} ...'.format(activation_function_options))
def loss_val(val):
loss_function_options = ('mean_squared_error',
'mean_absolute_error',
'mean_absolute_percentage_error',
'mean_squared_logarithmic_error',
'squared_hinge',
'hinge',
'categorical_hinge',
'logcosh',
'categorical_crossentropy',
'sparse_categorical_crossentropy',
'binary_crossentropy',
'kullback_leibler_divergence',
'poisson',
'cosine_proximity')
if val.lower() in loss_function_options:
return val
else:
raise argparse.ArgumentTypeError('Unexpected loss function. \
\nExpected values are: {} ...'.format(loss_function_options))
def get_nb_files(directory):
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt
def add_top_layer(args, base_model, nb_classes):
"""
This functions adds a fully connected convolutional neural network layer to a base model.
The required input arguments for this function are: args, base_model and nb_classes.
args: argument inputs the user arguments to be passed to the function,
base_model: argument inputs the base model architecture to be added to the top layer,
nb_classes: argument inputs the total number of classes for the output layer.
"""
try:
dropout = float(args.dropout[0])
weight_decay = float(args.decay[0])
enable_dropout = args.enable_dropout[0]
except:
dropout = DEFAULT_DROPOUT
weight_decay = 0.01
enable_dropout = True
print ('Invalid user input ...')
try:
activation = str(args.activation[0]).lower()
print ('Building model using activation function: ' + str(activation))
except:
activation = 'relu'
print ('Invalid user input for activation function ...')
print ('Choice of activation functions: hard_sigmoid, elu, linear, relu, selu, sigmoid, softmax, softplus, sofsign, tanh ...')
print ('Building model using default activation function: relu')
base_model.trainable = False
bm = base_model.output
x = Dropout(dropout,
name='gloablDropout')(bm,
training=enable_dropout)
gap = GlobalAveragePooling2D(name='gloablAveragePooling2D')(x)
bn = BatchNormalization(name='gloabl_batchNormalization')(x)
enable_attention = args.enable_attention[0]
enable_multilayerDense = args.enable_multilayerDense[0]
ATTN_UNIT_SIZE = 256
ATTN_CONV_LAYER_DEPTH = 2
if enable_attention:
"""
Covolutional attention layers
"""
preTrained_featureSize = base_model.get_output_shape_at(0)[-1]
x = bn
for i in range(ATTN_CONV_LAYER_DEPTH):
x = Conv2D(ATTN_UNIT_SIZE,
kernel_size=(1,1),
padding='same',
activation=activation,
name='convAttentionLayer_{}'.format(i))(x)
x = Dropout(dropout,
name='attentionDropout_{}'.format(i))(x,
training=enable_dropout)
x = Conv2D(1,
kernel_size=(1,1),
padding='valid',
activation=activation,
name='convAttentionLayer_1D')(x)
x = Dropout(dropout,
name='attentionDropout_1D')(x,
training=enable_dropout)
upConv2d_weights = np.ones((1, 1, 1, 1, preTrained_featureSize))
upConv2d = Conv2D(preTrained_featureSize,
kernel_size = (1,1),
padding = 'same',
activation = 'linear',
use_bias = False,
weights = upConv2d_weights,
name='upConv2d')
upConv2d.trainable = False
x = upConv2d(x)
maskFeatures = multiply([x,
bn],
name='multiply_maskFeature')
gapFeatures = GlobalAveragePooling2D(name='attentionGlobalAveragePooling_features')(maskFeatures)
gapMask = GlobalAveragePooling2D(name='attentionGlobalAveragePooling_mask')(x)
gap = Lambda(lambda x: x[0]/x[1],
name = 'rescaleGlobalAeragePooling')([gapFeatures,
gapMask])
if enable_multilayerDense:
x = Dropout(dropout,
name='dropout_fc1')(gap,
training=enable_dropout)
x = BatchNormalization(name='batchNormalization_fc1')(x)
x = Dense(FC_SIZE,
activation=activation,
kernel_regularizer=l2(weight_decay),
name='dense_fc1')(x)
x = Dropout(dropout,
name='dropout_fc2')(x,
training=enable_dropout)
x1 = Dense(FC_SIZE,
activation=activation,
kernel_regularizer=l2(weight_decay),
name="dense_fc2")(x)
x1 = Dropout(dropout,
name = 'dropout_fc3')(x1,
training=enable_dropout)
x1 = BatchNormalization(name="batchNormalization_fc2")(x1)
x1 = Dense(FC_SIZE,
activation=activation,
kernel_regularizer=l2(weight_decay),
name="dense_fc3")(x1)
x1 = Dropout(dropout,
name = 'dropout_fc4')(x1,
training=enable_dropout)
x2 = Dense(FC_SIZE,
activation=activation,
kernel_regularizer=l2(weight_decay),
name="dense_fc4")(x)
x2 = Dropout(dropout,
name = 'dropout_fc5')(x2,
training=enable_dropout)
x2 = BatchNormalization(name="batchNormalization_fc3")(x2)
x2 = Dense(FC_SIZE,
activation=activation,
kernel_regularizer=l2(weight_decay),
name="dense_fc5")(x2)
x2 = Dropout(dropout,
name = 'dropout_fc6')(x2,
training=enable_dropout)
x12 = concatenate([x1, x2], name = 'mixed11')
x12 = Dropout(dropout,
name = 'dropout_fc7')(x12,
training=enable_dropout)
x12 = Dense(FC_SIZE//16,
activation=activation,
kernel_regularizer=l2(weight_decay),
name = 'dense_fc6')(x12)
x12 = Dropout(dropout,
name = 'dropout_fc8')(x12,
training=enable_dropout)
x12 = BatchNormalization(name="batchNormalization_fc4")(x12)
x12 = Dense(FC_SIZE//32,
activation=activation,
kernel_regularizer=l2(weight_decay),
name = 'dense_fc7')(x12)
x12 = Dropout(dropout,
name = 'dropout_fc9')(x12,
training=enable_dropout)
x3 = Dense(FC_SIZE//2,
activation=activation,
kernel_regularizer=l2(weight_decay),
name = 'dense_fc8')(gap)
x3 = Dropout(dropout,
name = 'dropout_fc11')(x3,
training=enable_dropout)
x3 = BatchNormalization(name="batchNormalization_fc5")(x3)
x3 = Dense(FC_SIZE//2,
activation=activation,
kernel_regularizer=l2(weight_decay),
name = 'dense_fc9')(x3)
x3 = Dropout(dropout,
name = 'dropout_fc12')(x3,
training=enable_dropout)
xout = concatenate([x12, x3], name ='mixed12')
xout = Dense(FC_SIZE//32,
activation= activation,
kernel_regularizer=l2(weight_decay),
name = 'dense_fc10')(xout)
xout = Dropout(dropout,
name = 'dropout_fc13')(xout,
training=enable_dropout)
else:
x = BatchNormalization(name='batchNormalization_fc1')(gap)
xout = Dense(FC_SIZE,
activation=activation,
kernel_regularizer=l2(weight_decay),
name='dense_fc1')(x)
xout = Dropout(dropout,
name = 'dropout_fc13')(xout,
training=enable_dropout)
predictions = Dense(nb_classes, \
activation='softmax', \
kernel_regularizer=l2(weight_decay),
name='prediction')(xout) # Softmax output layer
model = Model(inputs=base_model.input,
outputs=predictions)
return model
def finetune_model(model, base_model, optimizer, loss, NB_FROZEN_LAYERS):
"""
A function that freezes the bottom NB_LAYERS and retrain the remaining top layers.
The required input arguments for this function are: model, optimizer and NB_FROZEN_LAYERS.
model: inputs a model architecture with base layers to be frozen during training,
optimizer: inputs a choice of optimizer value for compiling the model,
loss: inputs a choice for loss function used for compiling the model,
NB_FROZEN_LAYERS: inputs a number that selects the total number of base layers to be frozen during training.
"""
for layer in base_model.layers[:NB_FROZEN_LAYERS]:
layer.trainable = False
for layer in base_model.layers[NB_FROZEN_LAYERS:]:
layer.trainable = True
model.compile(optimizer=optimizer,
loss=loss,
metrics=['accuracy'])
return model
def transferlearn_model(model, base_model, optimizer, loss):
"""
Function that freezes the base layers to train just the top layer.
This function takes three positional arguments:
model: specifies the input model,
base_model: specifies the base model architecture,
optimizer: optimizer function for training the model,
loss: loss function for compiling the model
Example usage:
transferlearn_model(model, base_model, optimizer)
"""
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer=optimizer,
loss=loss,
metrics=['accuracy'])
return model
def save_model(args, name, model):
file_loc = args.output_dir[0]
file_pointer = os.path.join(file_loc+"//trained_"+ timestr)
model.save_weights(os.path.join(file_pointer + "_weights"+str(name)+".model"))
model_json = model.to_json() # Serialize model to JSON
with open(os.path.join(file_pointer+"_config"+str(name)+".json"), "w") as json_file:
json_file.write(model_json)
print ("Saved the trained model weights to: " +
str(os.path.join(file_pointer + "_weights"+str(name)+".model")))
print ("Saved the trained model configuration as a json file to: " +
str(os.path.join(file_pointer+"_config"+str(name)+".json")))
def generate_labels(args):
file_loc = args.output_dir[0]
file_pointer = os.path.join(file_loc+"//trained_labels")
data_dir = args.train_dir[0]
val_dir_ = args.val_dir[0]
dt = defaultdict(list)
dv = defaultdict(list)
for root, subdirs, files in os.walk(data_dir):
for filename in files:
file_path = os.path.join(root, filename)
assert file_path.startswith(data_dir)
suffix = file_path[len(data_dir):]
suffix = suffix.lstrip("/")
label = suffix.split("/")[0]
dt[label].append(file_path)
for root, subdirs, files in os.walk(val_dir_):
for filename in files:
file_path = os.path.join(root, filename)
assert file_path.startswith(val_dir_)
suffix = file_path[len(val_dir_):]
suffix = suffix.lstrip("/")
label = suffix.split("/")[0]
dv[label].append(file_path)
labels = sorted(dt.keys())
val_labels = sorted(dv.keys())
if set(labels) == set (val_labels):
print("\nTraining labels: " + str(labels))
print("\nValidation labels: " + str(val_labels))
with open(os.path.join(file_pointer+".json"), "w") as json_file:
json.dump(labels, json_file)
else:
print("\nTraining labels: " + str(labels))
print("\nValidation labels: " + str(val_labels))
print ("Mismatched training and validation data labels ...")
print ("Sub-folder names do not match between training and validation directories ...")
sys.exit(1)
return labels
def normalize(args,
labels,
move = False,
sub_sample = False):
if args.normalize[0] and os.path.exists(args.root_dir[0]):
commands = ["rm -r {}/.tmp_train/".format(args.root_dir[0]),
"rm -r {}/.tmp_validation/".format(args.root_dir[0]),
"mkdir {}/.tmp_train/".format(args.root_dir[0]),
"mkdir {}/.tmp_validation/".format(args.root_dir[0])]
execute_in_shell(command=commands,
verbose=verbose)
del commands
mk_train_folder = "mkdir -p {}/.tmp_train/".format(args.root_dir[0]) + "{}"
mk_val_folder = "mkdir -p {}/.tmp_validation/".format(args.root_dir[0]) + "{}"
train_class_sizes = []
val_class_sizes = []
for label in labels:
train_class_sizes.append(len(glob.glob(args.train_dir[0] + "/{}/*".format(label))))
val_class_sizes.append(len(glob.glob(args.val_dir[0] + "/{}/*".format(label))))
train_size = min(train_class_sizes)
val_size = min(val_class_sizes)
try:
if sub_sample and 0 <= args.train_sub_sample[0] <=1 and 0 <= args.val_sub_sample[0] <=1 :
train_size = int(train_size * args.train_sub_sample[0])
val_size = int(val_size * args.val_sub_sample[0])
except:
print ('Sub sample mode disabled ...')
print ("Normalized training class size {}".format(train_size))
print ("Normalized validation class size {}".format(val_size))
for label in labels:
commands = [mk_train_folder.format(label),
mk_val_folder.format(label)]
execute_in_shell(command=commands,
verbose=verbose)
del commands
commands = []
for label in labels:
train_images = (glob.glob('{}/{}/*.*'.format(args.train_dir[0], label), recursive=True))
val_images = (glob.glob('{}/{}/*.*'.format(args.val_dir[0], label), recursive=True))
sys_rnd = random.SystemRandom()
if move:
cmd = 'mv'
else:
cmd = 'cp'
for file in sys_rnd.sample(train_images, train_size):
if os.path.exists(file):
commands.append('{} {} {}/.tmp_train/{}/'.format(cmd, file, args.root_dir[0], label))
for file in sys_rnd.sample(val_images, val_size):
if os.path.exists(file):
commands.append('{} {} {}/.tmp_validation/{}/'.format(cmd, file, args.root_dir[0], label))
p = Process(target=execute_in_shell, args=([commands]))
p.start()
p.join()
print ("\nData normalization pipeline completed successfully ...")
else:
print ("\nFailed to initiate data normalization pipeline ...")
return False, None, None
return True, train_size, val_size
def generate_plot(args, name, model_train):
gen_plot = args.plot[0]
if gen_plot==True:
plot_training(args, name, model_train)
else:
print ("\nNo training summary plots generated ...")
print ("Set: --plot True for creating training summary plots")
def plot_training(args, name, history):
output_loc = args.output_dir[0]
output_file_acc = os.path.join(output_loc+
"//training_plot_acc_" +
timestr+str(name)+".png")
output_file_loss = os.path.join(output_loc+
"//training_plot_loss_" +
timestr+str(name)+".png")
fig_acc = plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
fig_acc.savefig(output_file_acc, dpi=fig_acc.dpi)
print ("Successfully created the training accuracy plot: "
+ str(output_file_acc))
plt.close()
fig_loss = plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
fig_loss.savefig(output_file_loss, dpi=fig_loss.dpi)
print ("Successfully created the loss function plot: "
+ str(output_file_loss))
plt.close()
def select_optimizer(args):
optimizer_val = args.optimizer_val[0]
lr = args.learning_rate[0]
decay = args.decay[0]
epsilon = args.epsilon[0]
rho = args.rho[0]
beta_1 = args.beta_1[0]
beta_2 = args.beta_2[0]
if optimizer_val.lower() == 'sgd' :
optimizer = SGD(lr=lr, \
decay=decay, \
momentum=1, \
nesterov=False)
print ("Using SGD as the optimizer ...")
elif optimizer_val.lower() == 'nsgd':
optimizer = SGD(lr=lr, \
decay=decay,\
momentum=1, \
nesterov=True)
print ("Using SGD as the optimizer with Nesterov momentum ...")
elif optimizer_val.lower() == 'rms' \
or \
optimizer_val.lower() == 'rmsprop':
optimizer = RMSprop(lr=lr, \
rho=rho, \
epsilon=epsilon,\
decay=decay)
print ("Using RMSProp as the optimizer ...")
elif optimizer_val.lower() == 'ada' \
or \
optimizer_val.lower() == 'adagrad':
optimizer = Adagrad(lr=lr, \
epsilon=epsilon, \
decay=decay)
print ("Using Adagrad as the optimizer ...")
elif optimizer_val.lower() == 'adelta' \
or \
optimizer_val.lower() == 'adadelta':
optimizer = Adadelta(lr=lr, \
rho=rho, \
epsilon=epsilon, \
decay=decay)
print ("Using Adadelta as the optimizer ...")
elif optimizer_val.lower() == 'adam':
optimizer = Adam(lr=lr, \
beta_1=beta_1, \
beta_2=beta_2, \
epsilon=epsilon, \
decay=decay, \
amsgrad=False)
print ("Using Adam as the optimizer ...")
print ("Optimizer parameters (recommended default): ")
print ("\n lr={} (0.001), \
\n beta_1={} (0.9), \
\n beta_2={} (0.999), \
\n epsilon={} (1e-08), \
\n decay={} (0.0)".format(lr,
beta_1,
beta_2,
epsilon,
decay))
elif optimizer_val.lower() == 'amsgrad':
optimizer = Adam(lr=lr, \
beta_1=beta_1, \
beta_2=beta_2, \
epsilon=epsilon, \
decay=decay, \
amsgrad=True)
print ("Using AmsGrad variant of Adam as the optimizer ...")
print ("Optimizer parameters (recommended default): ")
print ("\n lr={} (0.001), \
\n beta_1={} (0.9), \
\n beta_2={} (0.999), \
\n epsilon={} (1e-08), \
\n decay={} (0.0)".format(lr,
beta_1,
beta_2,
epsilon,
decay))
elif optimizer_val.lower() == 'adamax':
optimizer = Adamax(lr=lr, \
beta_1=beta_1, \
beta_2=beta_2, \
epsilon=epsilon, \
decay=decay)
print ("Using Adamax variant of Adam as the optimizer ...")
print ("Optimizer parameters (recommended default): ")
print ("\n lr={} (0.002), \
\n beta_1={} (0.9), \
\n beta_2={} (0.999), \
\n epsilon={} (1e-08), \
\n schedule_decay={} (0.0)".format(lr,
beta_1,
beta_2,
epsilon,
decay))
elif optimizer_val.lower() == 'nadam':
optimizer = Nadam(lr=lr, \
beta_1=beta_1, \
beta_2=beta_2, \
epsilon=epsilon, \
schedule_decay=decay)
print ("Using Nesterov Adam optimizer ...\
\n decay arguments is passed on to schedule_decay variable ...")
print ("Optimizer parameters (recommended default): ")
print ("\n lr={} (0.002), \
\n beta_1={} (0.9), \
\n beta_2={} (0.999), \
\n epsilon={} (1e-08), \
\n schedule_decay={} (0.004)".format(lr,
beta_1,
beta_2,
epsilon,
decay))
else:
optimizer = DEFAULT_OPTIMIZER
print ("Using stochastic gradient descent with Nesterov momentum ('nsgd') as the default optimizer ...")
print ("Options for optimizer are: 'sgd', \
\n'nsgd', \
\n'rmsprop', \
\n'adagrad', \
\n'adadelta', \
\n'adam', \
\n'nadam', \
\n'amsgrad', \
\n'adamax' ...")
return optimizer
def process_model(args,
model,
base_model,
optimizer,
loss,
checkpointer_savepath):
load_weights_ = args.load_weights[0]
fine_tune_model = args.fine_tune[0]
load_checkpoint = args.load_checkpoint[0]
if load_weights_ == True:
try:
with open(args.config_file[0]) as json_file:
model_json = json_file.read()
model = model_from_json(model_json)
except:
model = model
try:
model.load_weights(args.weights_file[0])
print ("\nLoaded model weights from: " + str(args.weights_file[0]))
except:
print ("\nError loading model weights ...")
print ("Tabula rasa ...")
print ("Loaded default model weights ...")
elif load_checkpoint == True and os.path.exists(checkpointer_savepath):
try:
model = load_model(checkpointer_savepath)
print ("\nLoaded model from checkpoint: " + str(checkpointer_savepath))
except:
if os.path.exists(args.saved_chkpnt[0]):
model = load_model(args.saved_chkpnt[0])
print ('\nLoaded saved checkpoint file ...')
else:
print ("\nError loading model checkpoint ...")
print ("Tabula rasa ...")
print ("Loaded default model weights ...")
else:
model = model
print ("\nTabula rasa ...")
print ("Loaded default model weights ...")
try:
NB_FROZEN_LAYERS = args.frozen_layers[0]
except:
NB_FROZEN_LAYERS = DEFAULT_NB_LAYERS_TO_FREEZE
if fine_tune_model == True:
print ("\nFine tuning Inception architecture ...")
print ("Frozen layers: " + str(NB_FROZEN_LAYERS))
model = finetune_model(model, base_model, optimizer, loss, NB_FROZEN_LAYERS)
else:
print ("\nTransfer learning using Inception architecture ...")
model = transferlearn_model(model, base_model, optimizer, loss)
return model
def process_images(args):
train_aug = args.train_aug[0]
test_aug = args.test_aug[0]
if str((args.base_model[0]).lower()) == 'inceptionv4' or \
str((args.base_model[0]).lower()) == 'inception_v4' or \
str((args.base_model[0]).lower()) == 'inception_resnet':
preprocess_input = preprocess_input_inceptionv4
else:
preprocess_input = preprocess_input_inceptionv3
if train_aug==True:
try:
train_rotation_range = args.train_rot[0]
train_width_shift_range = args.train_w_shift[0]
train_height_shift_range = args.train_ht_shift[0]
train_shear_range = args.train_shear[0]
train_zoom_range = args.train_zoom[0]
train_vertical_flip = args.train_vflip[0]
train_horizontal_flip = args.train_hflip[0]
except:
train_rotation_range = 30
train_width_shift_range = 0.2
train_height_shift_range = 0.2
train_shear_range = 0.2
train_zoom_range = 0.2
train_vertical_flip = True
train_horizontal_flip = True
print ("\nFailed to load custom training image augmentation parameters ...")
print ("Loaded pre-set defaults ...")
print ("To switch off image augmentation during training, set --train_augmentation flag to False")
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range=train_rotation_range,
width_shift_range=train_width_shift_range,
height_shift_range=train_height_shift_range,
shear_range=train_shear_range,
zoom_range=train_zoom_range,
vertical_flip=train_vertical_flip,
horizontal_flip=train_horizontal_flip)
print ("\nCreated image augmentation pipeline for training images ...")
print ("Image augmentation parameters for training images: \
\n image rotation range = {},\
\n width shift range = {},\
\n height shift range = {}, \
\n shear range = {} ,\
\n zoom range = {}, \
\n enable vertical flip = {}, \
\n enable horizontal flip = {}".format(train_rotation_range,
train_width_shift_range,
train_height_shift_range,
train_shear_range,
train_zoom_range,
train_vertical_flip,
train_horizontal_flip))
else:
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
if test_aug==True:
try:
test_rotation_range = args.test_rot[0]
test_width_shift_range = args.test_w_shift[0]
test_height_shift_range = args.test_ht_shift[0]
test_shear_range = args.test_shear[0]
test_zoom_range = args.test_zoom[0]
test_vertical_flip = args.test_vflip[0]
test_horizontal_flip = args.test_hflip[0]
except:
test_rotation_range = 30
test_width_shift_range = 0.2
test_height_shift_range = 0.2
test_shear_range = 0.2
test_zoom_range = 0.2
test_vertical_flip = True
test_horizontal_flip = True
print ("\nFailed to load custom training image augmentation parameters ...")
print ("Loaded pre-set defaults ...")
print ("To switch off image augmentation during training, set --train_augmentation flag to False")
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range=test_rotation_range,
width_shift_range=test_width_shift_range,
height_shift_range=test_height_shift_range,
shear_range=test_shear_range,
zoom_range=test_zoom_range,
vertical_flip=test_vertical_flip,
horizontal_flip=test_horizontal_flip)
print ("\nCreated image augmentation pipeline for training images ...")
print ("\nImage augmentation parameters for training images:")
print( "\n image rotation range = {},\
\n width shift range = {},\
\n height shift range = {}, \
\n shear range = {} ,\
\n zoom range = {}, \
\n enable vertical flip = {}, \
\n enable horizontal flip = {}".format(test_rotation_range,
test_width_shift_range,
test_height_shift_range,
test_shear_range,
test_zoom_range,
test_vertical_flip,
test_horizontal_flip))
else:
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
return [train_datagen, test_datagen]
def gen_model(args, enable_dropout):
if str((args.base_model[0]).lower()) == 'inceptionv4' or \
str((args.base_model[0]).lower()) == 'inception_v4' or \
str((args.base_model[0]).lower()) == 'inception_resnet':
base_model = InceptionResNetV2(weights='imagenet', \
include_top=False)
base_model_name = 'Inception version 4'
else:
base_model = InceptionV3(weights='imagenet',
include_top=False)
base_model_name = 'Inception version 3'
print ('\nBase model: ' + str(base_model_name))
nb_classes = len(glob.glob(args.train_dir[0] + "/*"))
model = add_top_layer(args,
base_model,
nb_classes)
print ("New top layer added to: " + str(base_model_name))
return [model, base_model]
def train(args):
"""
A function that takes the user arguments and initiates a training session of the neural network.
This function takes only one input: args
Example usage:
if train_model == True:
print ("Training sesssion initiated ...")
train(args)
"""
if not os.path.exists(args.output_dir[0]):
os.makedirs(args.output_dir[0])
optimizer = select_optimizer(args)
loss = args.loss[0]
checkpointer_savepath = os.path.join(args.output_dir[0] +
'/checkpoint/Transfer_learn_' +
str(IM_WIDTH) + '_' +
str(IM_HEIGHT) + '_' + '.h5')
nb_train_samples = get_nb_files(args.train_dir[0])
nb_classes = len(glob.glob(args.train_dir[0] + "/*"))
print ("\nTotal number of training samples = " + str(nb_train_samples))
print ("Number of training classes = " + str(nb_classes))
nb_val_samples = get_nb_files(args.val_dir[0])
nb_val_classes = len(glob.glob(args.val_dir[0] + "/*"))
print ("\nTotal number of validation samples = " + str(nb_val_samples))
print ("Number of validation classes = " + str(nb_val_classes))
if nb_val_classes == nb_classes:
print ("\nInitiating training session ...")
else:
print ("\nMismatched number of training and validation data classes ...")
print ("Unequal number of sub-folders found between train and validation directories ...")
print ("Each sub-folder in train and validation directroies are treated as a separate class ...")
print ("Correct this mismatch and re-run ...")
print ("\nNow exiting ...")
sys.exit(1)
nb_epoch = int(args.epoch[0])
batch_size = int(args.batch[0])
[train_datagen, validation_datagen] = process_images(args)
labels = generate_labels(args)
train_dir = args.train_dir[0]
val_dir = args.val_dir[0]
if args.normalize[0] and os.path.exists(args.root_dir[0]):
_, train_size, val_size = normalize(args,
labels,
move = False,
sub_sample = args.sub_sample[0])
train_dir = os.path.join(args.root_dir[0] +
str ('/.tmp_train/'))
val_dir = os.path.join(args.root_dir[0] +
str ('/.tmp_validation/'))
print ("\nGenerating training data: ... ")
train_generator = train_datagen.flow_from_directory(train_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
class_mode='categorical')
print ("\nGenerating validation data: ... ")
validation_generator = validation_datagen.flow_from_directory(val_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
class_mode='categorical')
enable_dropout = args.enable_dropout[0]
[model, base_model] = gen_model(args, enable_dropout)
model = process_model(args,
model,
base_model,
optimizer,
loss,
checkpointer_savepath)
print ("\nInitializing training with class labels: " +
str(labels))
model_summary_ = args.model_summary[0]
if model_summary_ == True:
print (model.summary())
else:
print ("\nSuccessfully loaded deep neural network classifier for training ...")
print ("\nReady, Steady, Go ...")
print ("\n")
if not os.path.exists(os.path.join(args.output_dir[0] + '/checkpoint/')):
os.makedirs(os.path.join(args.output_dir[0] + '/checkpoint/'))
lr = args.learning_rate[0]
earlystopper = EarlyStopping(patience=6,
verbose=1)
checkpointer = ModelCheckpoint(checkpointer_savepath,
verbose=1,
save_best_only=True)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=2,
mode = 'max',
epsilon=1e-4,
cooldown=1,
verbose=1,
factor=0.5,
min_lr=lr*1e-2)
steps_pre_epoch = nb_train_samples//batch_size
validation_steps = nb_val_samples//batch_size
if args.normalize[0]:
steps_pre_epoch = (train_size*len(labels))//batch_size
validation_steps = (train_size*len(labels))//batch_size
model_train = model.fit_generator(train_generator,
epochs=nb_epoch,
steps_per_epoch=steps_pre_epoch,
validation_data=validation_generator,
validation_steps=validation_steps,
class_weight='auto',
callbacks=[earlystopper,
learning_rate_reduction,
checkpointer])
if args.fine_tune[0] == True:
save_model(args, "_ft_", model)
generate_plot(args, "_ft_", model_train)
else:
save_model(args, "_tl_", model)
generate_plot(args, "_tl_", model_train)
def get_user_options():
"""
A function that uses argument parser to pass user options from command line.
Example usage:
args = get_user_options()
if ((not os.path.exists(args.train_dir[0]))
or
(not os.path.exists(args.val_dir[0]))
or
(not os.path.exists(args.output_dir[0]))):
print("Specified directories do not exist ...")
sys.exit(1)
"""
a = argparse.ArgumentParser()
a.add_argument("--training_directory",
help = "Specify folder contraining the training files ...",
dest = "train_dir",
required = True,
type=lambda x: is_valid_dir(a, x),
nargs=1)
a.add_argument("--validation_directory",
help = "Specify folder containing the validation files ...",
dest = "val_dir",
required = True,
type=lambda x: is_valid_dir(a, x),
nargs=1)
a.add_argument("--root_directory",
help = "Specify the root folder for sub-sampling and normalization ...",
dest = "root_dir",
required = False,
type=lambda x: is_valid_dir(a, x),
default = ['./'],
nargs=1)
a.add_argument("--epochs",
help = "Specify epochs for training ...",
dest = "epoch",
default=[DEFAULT_EPOCHS],
required=False,
type = int,
nargs=1)
a.add_argument("--batches", help = "Specify batches for training ...",
dest = "batch",
default=[DEFAULT_BATCHES],
required=False,
type = int,
nargs=1)
a.add_argument("--weights_file",
help = "Specify pre-trained model weights for training ...",
dest = "weights_file",
required=False,
type=lambda x: is_valid_file(a, x),
nargs=1)
a.add_argument("--checkpoints_file",
help = "Specify saved checkpoint weights for resuming training ...",
dest = "saved_chkpnt",
required=False,
type=lambda x: is_valid_file(a, x),
nargs=1)
a.add_argument("--config_file",
help = "Specify pre-trained model configuration file ...",
dest = "config_file",
required=False,
type=lambda x: is_valid_file(a, x),
nargs=1)
a.add_argument("--output_directory",
help = "Specify output folder ...",
dest = "output_dir",
required = True,
type=lambda x: is_valid_dir(a, x),
nargs=1)
a.add_argument("--train_model",
help = "Specify if the model should be trained ...",
dest = "train_model",
required=True,
default=[True],
nargs=1,
type = string_to_bool)
a.add_argument("--enable_dropout",
help = "Specify if the dropout layer should be enabled during inference ...",
dest = "enable_dropout",
required=False,
default=[True],
nargs=1,
type = string_to_bool)
a.add_argument("--enable_attention",
help = "Specify if the dropout layer should be enabled during inference ...",
dest = "enable_attention",
required=False,
default=[True],
nargs=1,
type = string_to_bool)
a.add_argument("--enable_multilayerDense",
help = "Specify if the dropout layer should be enabled during inference ...",
dest = "enable_multilayerDense",
required=False,
default=[False],
nargs=1,
type = string_to_bool)
a.add_argument("--load_truncated",
help = "Specify if truncated image loading should be supported ...",
dest = "load_truncated",
required=False,
default=[False],
nargs=1,
type = string_to_bool)
a.add_argument("--load_weights",
help = "Specify if pre-trained model should be loaded ...",
dest = "load_weights",
required=False,
default=[False],
nargs=1,
type = string_to_bool)
a.add_argument("--load_checkpoint",
help = "Specify if checkpointed weights are to be used ...",
dest = "load_checkpoint",
required=False,
default=[True],
nargs=1,
type = string_to_bool)
a.add_argument("--fine_tune",
help = "Specify model should be fine tuned ...",
dest = "fine_tune",
required=False,
default=[True],
nargs=1,
type = string_to_bool)
a.add_argument("--test_augmentation",
help = "Specify image augmentation for test dataset ...",
dest = "test_aug",
required=False,
default=[False],
nargs=1,
type = string_to_bool)
a.add_argument("--train_augmentation",
help = "Specify image augmentation for train dataset ...",
dest = "train_aug",
required=False,
default=[False],
nargs=1,
type = string_to_bool)
a.add_argument("--normalize",
help = "Specify if a training and validation data should be normalized ...",
dest = "normalize",
required=False,
default=[False],
nargs=1,
type = string_to_bool)
a.add_argument("--sub_sample",
help = "Specify if a training and validation data should be should be sub sampled ...",
dest = "normalize",
required=False,
default=[False],
nargs=1,
type = string_to_bool)
a.add_argument("--train_sub_sample",
help = "Specify the sub sampling fraction for training data ...",
dest = "train_sub_sample",
required=False,
default=[0.8],
type = float,
nargs=1)
a.add_argument("--validation_sub_sample",
help = "Specify the sub sampling fraction for validation data ...",
dest = "val_sub_sample",
required=False,
default=[0.8],
type = float,
nargs=1)
a.add_argument("--plot",
help = "Specify if a plot should be generated ...",
dest = "plot",
required=False,
default=[True],
nargs=1,
type = string_to_bool)
a.add_argument("--summary",
help = "Specify if a summary should be generated ...",
dest = "model_summary",
required=False,
default=[False],
type = string_to_bool,
nargs=1)
a.add_argument("--train_image_rotation",
help = "Specify values for rotation range to be applied to training images during pre-processing ...",
dest = "train_rot",
required=False,
default=[30],
type = float,
nargs=1)
a.add_argument("--train_image_width_shift",
help = "Specify values for width shift range to be applied to training images during pre-processing ...",
dest = "train_w_shift",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--train_image_height_shift",
help = "Specify values for height shift range to be applied to training images during pre-processing ...",
dest = "train_ht_shift",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--train_image_shear",
help = "Specify values for shear transformation range to be applied to training images during pre-processing ...",
dest = "train_shear",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--train_image_zoom",
help = "Specify values for zooming transformation range to be applied to training images during pre-processing ...",
dest = "train_zoom",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--train_image_vertical_flip",
help = "Specify if training image should be randomly flipped vertical during pre-processing ...",
dest = "train_vflip",
required=False,
default=[False],
type = string_to_bool,
nargs=1)
a.add_argument("--train_image_horizontal_flip",
help = "Specify if training image should be randomly flipped horizontal during pre-processing ...",
dest = "train_hflip",
required=False,
default=[False],
type = string_to_bool,
nargs=1)
a.add_argument("--test_image_rotation",
help = "Specify values for rotation range to be applied to training images during pre-processing ...",
dest = "test_rot",
required=False,
default=[30],
type = float,
nargs=1)
a.add_argument("--test_image_width_shift",
help = "Specify values for width shift range to be applied to training images during pre-processing ...",
dest = "test_w_shift",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--test_image_height_shift",
help = "Specify values for height shift range to be applied to training images during pre-processing ...",
dest = "test_ht_shift",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--test_image_shear",
help = "Specify values for shear transformation range to be applied to training images during pre-processing ...",
dest = "test_shear",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--test_image_zoom",
help = "Specify values for zooming transformation range to be applied to training images during pre-processing ...",
dest = "test_zoom",
required=False,
default=[0.2],
type = float,
nargs=1)
a.add_argument("--test_image_vertical_flip",
help = "Specify if training image should be randomly flipped vertical during pre-processing ...",
dest = "test_vflip",
required=False,
default=[False],
type = string_to_bool,
nargs=1)
a.add_argument("--test_image_horizontal_flip",
help = "Specify if training image should be randomly flipped horizontal during pre-processing ...",
dest = "test_hflip",
required=False,
default=[False],
type = string_to_bool,
nargs=1)
a.add_argument("--dropout",
help = "Specify values for dropout function ...",
dest = "dropout",
required=False,
default=[0.4],
type = float,
nargs=1)
a.add_argument("--epsilon",
help = "Specify values for epsilon function ...",
dest = "epsilon",
required=False,
default=[1e-8],
type = float,
nargs=1)
a.add_argument("--activation",
help = "Specify activation function.\
\nAvailable activation functions are: 'hard_sigmoid', \
'elu', \
'linear', \
'relu', \
'selu', \
'sigmoid', \
'softmax', \
'softplus', \
'sofsign', \
'tanh' ...",
dest = "activation",
required=False,
default=['relu'],
type = activation_val,
nargs=1)
a.add_argument("--loss",
help = "Specify loss function.\
\nAvailable loss functions are: 'mean_squared_error', \
'mean_absolute_error' \
'mean_absolute_percentage_error' \
'mean_squared_logarithmic_error' \
'squared_hinge' \
'hinge', \
'categorical_hinge', \
'logcosh', \
'categorical_crossentropy', \
'sparse_categorical_crossentropy', \
'binary_crossentropy', \
'kullback_leibler_divergence', \
'poisson', \
'cosine_proximity' ...",
dest = "loss",
required=False,
default=['categorical_crossentropy'],
type = loss_val,
nargs=1)
a.add_argument("--learning_rate",
help = "Specify values for learning rate ...",
dest = "learning_rate",
required=False,
default=[1e-07],
type = float,
nargs=1)
a.add_argument("--rho",
help = "Specify values for rho\
\n Applied to RMSprop and Adadelta ...",
dest = "rho",
required=False,
default=[0.9],
type = float,
nargs=1)
a.add_argument("--beta_1",
help = "Specify values for beta_1\
\n Applied to Adam, AmsGrad, Adadelta and Nadam ...",
dest = "beta_1",
required=False,
default=[0.9],
type = float,
nargs=1)
a.add_argument("--beta_2",
help = "Specify values for beta_2\
\n Applied to Adam, AmsGrad, Adadelta and Nadam ...",
dest = "beta_2",
required=False,
default=[0.999],
type = float,
nargs=1)
a.add_argument("--decay",
help = "Specify values for decay function ...",
dest = "decay",
required=False,
default=[0.0],
type = float,
nargs=1)
a.add_argument("--optimizer",
help = "Specify the type of optimizer to choose from. \
\nOptions for optimizer are: 'sgd', \
'nsgd', \
'rmsprop', \
'adagrad', \
'adadelta',\
'adam', \
'nadam', \
'amsgrad', \
'adamax' ...",
dest = "optimizer_val",
required=False,
default=['adam'],
nargs=1)
a.add_argument("--base_model",
help = "Specify the type of base model classifier to build the neural network. \
\nOptions are: Inception_v4 or Inception_v3 ...",
dest = "base_model",
required=False,
default=['Inception_V4'],
nargs=1)
a.add_argument("--frozen_layers",
help = "Specify the number of frozen bottom layers during fine-tuning ...",
dest = "frozen_layers",
required=False,
default=[DEFAULT_NB_LAYERS_TO_FREEZE],
type = int,
nargs=1)
args = a.parse_args()
return args
if __name__=="__main__":
args = get_user_options()
if ((not os.path.exists(args.train_dir[0]))
or
(not os.path.exists(args.val_dir[0]))
or
(not os.path.exists(args.output_dir[0]))):
print("Specified directories do not exist ...")
sys.exit(1)
train_model = args.train_model[0]
if args.load_truncated[0]:
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
if train_model ==True:
print ("Training sesssion initiated ...")
train(args)
else:
print ("Nothing to do here ...")
print ("Try setting the --train_model flag to True ...")
print ("For more help, run with -h flag ...")
sys.exit(1)
| mit |
gclenaghan/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
clawpack/adjoint | paper2_examples/acoustics_2d_ex3/generate_tolplots.py | 1 | 7318 | from numpy import *
from matplotlib.pyplot import *
from pylab import *
# Setting up local variables
tols = ['1e-0','6e-1','3e-1',
'1e-1','6e-2','3e-2',
'1e-2','6e-3','3e-3',
'1e-3','6e-4','3e-4',
'1e-4','6e-5','3e-5',
'1e-5']
## ---------------------------------
## Setting up vectors of amount of work done:
## ---------------------------------
# Timing and cells from adjoint-magnitude flagging
amag_regrid = [ 168.3828 , 160.60822222, 150.9964 , 138.6031 ,
82.6206 , 47.2141 , 35.9217 , 14.6139 ,
2.756 , 0.796 , 0.3159 , 0.2619 ,
0.2592 , 0.2268 , 0.2229 , 0.2215 ]
amag_times = [ 1.58622970e+03, 1.49281189e+03, 1.34246280e+03,
1.16435800e+03, 6.55311900e+02, 3.61060100e+02,
2.66386500e+02, 1.03079700e+02, 1.77373000e+01,
5.11060000e+00, 1.97270000e+00, 1.55870000e+00,
1.49700000e+00, 1.25520000e+00, 1.24660000e+00,
1.23730000e+00]
amag_regrid = amag_regrid[::-1]
amag_times = amag_times[::-1]
# Timing and cells from adjoint-error flagging
aerr_regrid = [ 222.4333, 221.5709, 219.9372, 219.7524, 215.9752, 213.4921,
213.1097, 202.8152, 104.2194, 0.3299, 0.3157, 0.2973,
0.322 , 0.3236, 0.3348, 0.3322]
aerr_times = [ 1.65396950e+03, 1.64503170e+03, 1.63447290e+03,
1.63348660e+03, 1.58072580e+03, 1.55252160e+03,
1.54025240e+03, 1.39645700e+03, 6.43393800e+02,
1.42510000e+00, 1.40220000e+00, 1.29830000e+00,
1.38490000e+00, 1.37970000e+00, 1.42630000e+00,
1.41970000e+00]
aerr_regrid = aerr_regrid[::-1]
aerr_times = aerr_times[::-1]
# Timing and cells from difference-flagging
diff_regrid = [ 101.3334, 100.4365, 99.8504, 101.3604, 99.7821, 96.9741,
94.8886, 88.6307, 80.675 , 76.9986, 49.4936, 29.1913,
14.6271, 1.3023, 0.2764, 0.1175]
diff_times = [ 1.71767820e+03, 1.71282980e+03, 1.70584980e+03,
1.70690280e+03, 1.71527140e+03, 1.67923300e+03,
1.63604370e+03, 1.54464870e+03, 1.39813830e+03,
1.29744650e+03, 7.76082100e+02, 4.37399700e+02,
2.10789500e+02, 1.55218000e+01, 3.33560000e+00,
1.47840000e+00]
diff_regrid = diff_regrid[::-1]
diff_times = diff_times[::-1]
# Timing and cells from error-flagging
err_regrid = [ 207.3158, 194.2371, 182.5131, 164.8369, 103.2788, 59.2029,
22.7793, 2.6414, 0.7793, 0.446 , 0.2799, 0.2688,
0.2807, 0.2664, 0.2709, 0.2642]
err_times = [ 1.70307200e+03, 1.48634670e+03, 1.28684730e+03,
1.04123930e+03, 5.06128100e+02, 2.31970000e+02,
7.67219000e+01, 8.73310000e+00, 2.72430000e+00,
1.68660000e+00, 1.16740000e+00, 1.12400000e+00,
1.18160000e+00, 1.16350000e+00, 1.19180000e+00,
1.16100000e+00]
err_regrid = err_regrid[::-1]
err_times = err_times[::-1]
## ---------------------------------
## Setting up vectors with error in solutions:
## ---------------------------------
diff_errors_fine = [ 1.88803323e-02, 1.80756249e-02, 1.31571836e-02,
2.44708545e-03, 6.67765406e-04, 1.13649307e-04,
4.30387844e-05, 2.34284826e-06, 8.14584907e-06,
5.12725708e-07, 5.49981515e-06, 3.11343017e-07,
3.09194313e-07, 3.08818669e-07, 3.08488960e-07,
3.08261548e-07]
amag_errors_fine = [ 1.66309793e-02, 1.66309793e-02, 1.66309793e-02,
1.63869141e-02, 1.63830704e-02, 1.63455552e-02,
1.76997206e-02, 1.46060597e-02, 1.13565015e-03,
5.12797344e-04, 2.74694425e-04, 3.40168469e-04,
3.38413701e-05, 1.26387074e-07, 8.35988978e-06,
0.00000000e+00]
aerr_errors_fine = [ 1.91503471e-02, 1.91503471e-02, 1.91503471e-02,
1.91503471e-02, 1.91503471e-02, 1.91503471e-02,
1.91180102e-02, 8.06758744e-04, 4.41458490e-05,
7.03947247e-06, 7.14602929e-06, 1.95601111e-06,
1.14262929e-06, 2.76824135e-06, 2.12155203e-06,
1.96862724e-06]
err_errors_fine = [ 1.66309793e-02, 1.66309793e-02, 1.66309793e-02,
1.66309793e-02, 1.66309163e-02, 1.66234431e-02,
1.64147606e-02, 1.50676298e-02, 1.10137446e-02,
7.27707216e-03, 2.06864462e-03, 6.11217733e-04,
8.21253224e-05, 5.67292759e-05, 8.46845563e-06,
4.05137217e-06]
size = 20
fig = figure(1,(10,8))
axes([0.11,0.23,0.85,0.72])
loglog(tols,err_errors_fine,'r',label='Error-Flagging',linewidth=2)
loglog(tols,diff_errors_fine,'b',label='Difference-Flagging',linewidth=2)
loglog(tols,aerr_errors_fine,'k',label='Adjoint-Error Flagging',linewidth=2)
loglog(tols,amag_errors_fine,'g',label='Adjoint-Magnitude Flagging',linewidth=2)
legend(bbox_to_anchor=(0.95,0), loc="lower right",bbox_transform=fig.transFigure, ncol=2,fontsize=size)
title("Tolerance vs. Error in J",fontsize=size)
xlabel("Tolerance",fontsize=size)
ylabel("Error in J",fontsize=size)
tick_params(axis='y',labelsize=size)
tick_params(axis='x',labelsize=size)
plt.axis([10**(-4), 10**(0), 5*10**(-4), 2*10**(-2)])
savefig('tolvserror_2d_ex3.png')
clf()
fig2 = figure(1,(10,8))
axes([0.11,0.23,0.85,0.72])
plt.semilogy(err_times[:],err_errors_fine[:],'r',label='Error-Flagging',linewidth=2)
plt.semilogy(diff_times[:],diff_errors_fine[:],'b',label='Difference-Flagging',linewidth=2)
plt.semilogy(aerr_times[:],aerr_errors_fine[:],'k',label='Adjoint-Error Flagging',linewidth=2)
plt.semilogy(amag_times,amag_errors_fine,'g',label='Adjoint-Magnitude Flagging',linewidth=2)
plt.title('Error vs. Total CPU Time', fontsize=size)
plt.legend(loc=3,fontsize=size)
xlabel("CPU Time",fontsize=size)
ylabel("Error",fontsize=size)
plt.axis([0, 800, 5*10**(-4), 2*10**(-2)])
tick_params(axis='y',labelsize=size)
tick_params(axis='x',labelsize=size)
legend(bbox_to_anchor=(0.95,0), loc="lower right",bbox_transform=fig2.transFigure, ncol=2,fontsize=size)
plt.savefig('errvstime_2d_ex3.png')
clf()
fig3 = figure(1,(10,8))
axes([0.11,0.23,0.85,0.72])
plt.semilogy(err_regrid,err_errors_fine,'r',label='Error-Flagging',linewidth=2)
plt.semilogy(diff_regrid,diff_errors_fine,'b',label='Difference-Flagging',linewidth=2)
plt.semilogy(aerr_regrid,aerr_errors_fine,'k',label='Adjoint-Error Flagging',linewidth=2)
plt.semilogy(amag_regrid,amag_errors_fine,'g',label='Adjoint-Magnitude Flagging',linewidth=2)
plt.title('Error vs. Regridding CPU Time', fontsize=size)
plt.legend(loc=3,fontsize=size)
xlabel("CPU Time",fontsize=size)
ylabel("Error",fontsize=size)
plt.axis([0, 140, 5*10**(-4), 2*10**(-2)])
tick_params(axis='y',labelsize=size)
tick_params(axis='x',labelsize=size)
legend(bbox_to_anchor=(0.95,0), loc="lower right",bbox_transform=fig3.transFigure, ncol=2,fontsize=size)
plt.savefig('errvsregridtime_2d_ex3.png')
clf()
| bsd-2-clause |
Sentient07/scikit-learn | examples/mixture/plot_gmm_covariances.py | 89 | 4724 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
hrichstein/phys_50733 | rh_project/two_body.py | 1 | 5689 | # Restricted Three-Body Problem
import numpy as np
import matplotlib.pyplot as plt
# from scipy.constants import G
# Setting plotting parameters
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
G = 4 * np.pi**2 # AU^3 yr^-2 M_sun^-1
# m_1 = 1 # Solar mass
# m_2 = 1 # Solar mass
# mu = m_1 * m_2 / (m_1 + m_2) # Reduced mass
# M = m_1 + m_2
# r_maj = # Length of major axis; mean of the greatest and smallest distance apart of the two stars during their orbit
# r = np.array([r_maj * cos(theta),r_maj * sin(theta)])
# r_1 = -m_2 / (m_1 + m_2) * r # Position of first star (cm frame)
# r_2 = m_1 / (m_1 + m_2) * r # Position of second star
# r_cm = r_2 - r_1
# a_r = -G * M / r_cm**3 * r
# const = np.sqrt(r_maj * G * M)
# period = np.sqrt((4 * np.pi**2 * r_maj**3) / (G * M))
# r_3 # Position of third star
# a_1 = -G * m_2 * (r_1 - r_2) / (abs(r_1 - r_2))**3
# a_2 = -G * m_1 * (r_2 - r_1) / (abs(r_2 - r_1))**3
# a_3 = -G * m_1 * (r_3 - r_1) / (abs(r_3 - r_1))**3 - /
# G * m_2 * (r_3 - r_2) / (abs(r_3 - r_2))**3
# a = # mean separation in AU
# period = np.sqrt(a**3 / (m_1 + m_2)) # years
# # masses in solar masses
# period = 2*pi/ angular acceleration
# period = 2* pi * r/v
# v = 2 * pi * r /T
# G is in Newton meter^2/kg^2
# Use f = Gmm/r^2 : r is distance between the centers
# Find expressions for acceleration
# Find initial velocity using Kepler's third law (finding period)
# obtain initial velocities and positions, then plug into runga-kutta
# find period and acceleration then analytically find a circular orbit
# for runga-kutta, need x pos, y pos, x vel, y vel
# Newton's version of Kepler's 3rd Law
# M1+M2 = A^3 / P^2
# Masses in solar, semi-major axis in AU
# 1 Msun is 1.99 * 10^30 kg
# 1 AU is 149,600,000 km
def find_vel_init(M1, M2, A):
period = np.sqrt(A**3 / (M1 + M2))
v = 2 * np.pi * (A / 2) / period
return v
A = 0.2 # AU
a = 0
b = 100
N = 250
h = (b-a) / N
tpoints = np.arange(a,b,h)
# initial velocity only in one direction
vx0 = find_vel_init(1, 1, A)
#Giving one a negative velocity
# x pos, y pos, x pos, y pos, xvel, yvel, xvel, yvel
all_params = np.array([0, 0.1, 0, -0.1, vx0, 0., -vx0, 0.])
def a_r(r1, r2):
dx =
dy =
a = -G * m * (r1 - r2)/ abs(r1-r2)**3
return a
def rk4(params, h):
#First star positions
x0_1 = params[0]
y0_1 = params[1]
#First star velocities
vx0_1 = params[4]
vy0_1 = params[5]
#Second star positions
x0_2 = params[2]
y0_2 = params[3]
#Second star velocities
vx0_2 = params[6]
vy0_2 = params[7]
#First star accelerations
ax1_1 = a_r(1,x0_1,x0_2)
ay1_1 = a_r(1,y0_1,y0_2)
#Second star accelerations
ax1_2 = a_r(1,x0_1,x0_2)
ay1_2 = a_r(1,y0_1,y0_2)
################# STEP 1
#First star
x1_1 = x0_1 + 0.5*vx0_1*h
y1_1 = y0_1 + 0.5*vy0_1*h
#First star
vx1_1 = vx0_1 + 0.5*ax1_1*h
vy1_1 = vy0_1 + 0.5*ay1_1*h
#Second star
x1_2 = x0_2 + 0.5*vx0_2*h
y1_2 = y0_2 + 0.5*vy0_2*h
vx1_2 = vx0_2 + 0.5*ax1_2*h
vy1_2 = vy0_2 + 0.5*ay1_2*h
#First star accelerations
ax2_1 = a_r(1,x1_1,x1_2)
ay2_1 = a_r(1,y1_1,y1_2)
#Second star accelerations
ax2_2 = a_r(1,x1_1,x1_2)
ay2_2 = a_r(1,y1_1,y1_2)
################# STEP 2
#First star
x2_1 = x0_1 + 0.5*vx1_1*h
y2_1 = y0_1 + 0.5*vy1_1*h
vx2_1 = vx0_1 + 0.5*ax2_1*h
vy2_1 = vy0_1 + 0.5*ay2_1*h
#Second star
x2_2 = x0_2 + 0.5*vx1_2*h
y2_2 = y0_2 + 0.5*vy1_2*h
vx2_2 = vx0_2 + 0.5*ax2_2*h
vy2_2 = vy0_2 + 0.5*ay2_2*h
#First star accelerations
ax3_1 = a_r(1,x2_1,x2_2)
ay3_1 = a_r(1,y2_1,y2_2)
#Second star accelerations
ax3_2 = a_r(1,x2_1,x2_2)
ay3_2 = a_r(1,y2_1,y2_2)
################# STEP 3
#First star
x3_1 = x0_1 + vx2_1*h
y3_1 = y0_1 + vy2_1*h
vx3_1 = vx0_1 + ax3_1*h
vy3_1 = vy0_1 + ay3_1*h
#Second star
x3_2 = x0_2 + vx2_2*h
y3_2 = y0_2 + vy2_2*h
vx3_2 = vx0_2 + ax3_2*h
vy3_2 = vy0_2 + ay3_2*h
#First star accelerations
ax4_1 = a_r(1,x3_1,x3_2)
ay4_1 = a_r(1,y3_1,y3_2)
#Second star accelerations
ax4_2 = a_r(1,x3_1,x3_2)
ay4_2 = a_r(1,y3_1,y3_2)
#Final pos
x1_f = x0_1 + h*(vx0_1 + 2*vx1_1 + 2*vx2_1 + vx3_1)/6
x2_f = x0_2 + h*(vx0_2 + 2*vx1_2 + 2*vx2_2 + vx3_2)/6
y1_f = y0_1 + h*(vy0_1 + 2*vy1_1 + 2*vy2_1 + vy3_1)/6
y2_f = y0_2 + h*(vy0_2 + 2*vy1_2 + 2*vy2_2 + vy3_2)/6
#Final vels
vx1_f = vx0_1 + h*(ax1_1 + 2*ax2_1 + 2*ax3_1 + ax4_1)/6
vx2_f = vx0_2 + h*(ax1_2 + 2*ax2_2 + 2*ax3_2 + ax4_2)/6
vy1_f = vy0_1 + h*(ay1_1 + 2*ay2_1 + 2*ay3_1 + ay4_1)/6
vy2_f = vy0_2 + h*(ay1_2 + 2*ay2_2 + 2*ay3_2 + ay4_2)/6
# x pos, y pos, x pos, y pos, xvel, yvel, xvel, yvel
new_params = np.array([x1_f,y1_f,x2_f,y2_f,vx1_f,vy1_f,vx2_f,vy2_f])
return new_params
x1_points = [[] for tt in range(len(tpoints))]
y1_points = [[] for tt in range(len(tpoints))]
x2_points = [[] for tt in range(len(tpoints))]
y2_points = [[] for tt in range(len(tpoints))]
for tt in range(len(tpoints)):
x1_points[tt] = all_params[0]
y1_points[tt] = all_params[1]
x2_points[tt] = all_params[2]
y2_points[tt] = all_params[3]
all_params = rk4(all_params, h)
plt.plot(x1_points,y1_points)
plt.plot(x2_points,y2_points)
plt.show()
# a_1 = -G * m_2 * (r_1 - r_2) / (abs(r_1 - r_2))**3
# a_2 = -G * m_1 * (r_2 - r_1) / (abs(r_2 - r_1))**3
# put stars on opposite sides of zero
# given initial velocities and positions, then put into runga-kutta
# def grav_force(m1, m2, r_sep):
# """
# m in solar masses
# r in AU
# """
# force = G * m1 * m2 / r_sep**2
# return force
### Decompose acceleration equation into components
### For test cases, put planet super far out (assume massless) and then close to one star
| mit |
pair-code/lit | lit_nlp/examples/coref/datasets/winogender.py | 2 | 6390 | """Coreference version of the Winogender dataset.
Each instance has two edges, one between the pronoun and the occupation and one
between the pronoun and the participant. The pronoun is always span1.
There are 120 templates in the Winogender set, 60 coreferent with the
occupation, and 60 coreferent with the participant. Each is instantiated
six times: with and without "someone" substituting for the participant,
and with {male, female, neutral} pronouns, for a total of 720 examples.
Winogender repo: https://github.com/rudinger/winogender-schemas
Paper: Gender Bias in Coreference Resolution (Rudinger et al. 2018),
https://arxiv.org/pdf/1804.09301.pdf
"""
import enum
import os
from typing import Optional
from absl import logging
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import dtypes as lit_dtypes
from lit_nlp.api import types as lit_types
import pandas as pd
import transformers # for file caching
EdgeLabel = lit_dtypes.EdgeLabel
DATA_ROOT = "https://raw.githubusercontent.com/rudinger/winogender-schemas/master/data/" # pylint: disable=line-too-long
def get_data(name):
"""Download data or return local cache path."""
url = os.path.join(DATA_ROOT, name)
logging.info("Winogender: retrieving data file %s", url)
return transformers.file_utils.cached_path(url)
## From gap-coreference/constants.py
class Gender(enum.Enum):
UNKNOWN = 0
MASCULINE = 1
FEMININE = 2
NOM = "$NOM_PRONOUN"
POSS = "$POSS_PRONOUN"
ACC = "$ACC_PRONOUN"
PRONOUN_MAP = {
Gender.FEMININE: {
NOM: "she",
POSS: "her",
ACC: "her"
},
Gender.MASCULINE: {
NOM: "he",
POSS: "his",
ACC: "him"
},
Gender.UNKNOWN: {
NOM: "they",
POSS: "their",
ACC: "them"
},
}
ANSWER_VOCAB = ["occupation", "participant"]
PRONOUNS_BY_GENDER = {k: "/".join(PRONOUN_MAP[k].values()) for k in PRONOUN_MAP}
# Based on winogender-schemas/scripts/instantiate.py, but adapted to LIT format.
def generate_instance(occupation,
participant,
answer,
sentence,
gender=Gender.UNKNOWN,
someone=False):
"""Generate a Winogender example from a template row."""
toks = sentence.split(" ")
part_index = toks.index("$PARTICIPANT")
if not someone:
# we are using the instantiated participant,
# e.g. "client", "patient", "customer",...
toks[part_index] = participant
else: # we are using the bleached NP "someone" for the other participant
# first, remove the token that precedes $PARTICIPANT, i.e. "the"
toks = toks[:part_index - 1] + toks[part_index:]
# recompute participant index (it should be part_index - 1)
part_index = toks.index("$PARTICIPANT")
toks[part_index] = "Someone" if part_index == 0 else "someone"
# Make sure we do this /after/ substituting "someone",
# since that may change indices.
occ_index = toks.index("$OCCUPATION")
# This should always pass on the regular Winogender dataset.
assert " " not in occupation, "Occupation must be single-token."
toks[occ_index] = occupation
pronoun_idx = None
gendered_toks = []
for i, t in enumerate(toks):
sub = PRONOUN_MAP[gender].get(t, t)
if sub != t:
pronoun_idx = i
gendered_toks.append(sub)
# NOM, POSS, ACC
pronoun_type = toks[pronoun_idx][1:].replace("_PRONOUN", "")
# Process text for fluency
text = " ".join(gendered_toks)
text = text.replace("they was", "they were")
text = text.replace("They was", "They were")
record = {"text": text, "tokens": text.split()}
t0 = EdgeLabel(
span1=(occ_index, occ_index + 1),
span2=(pronoun_idx, pronoun_idx + 1),
label=int(1 if answer == 0 else 0))
t1 = EdgeLabel(
span1=(part_index, part_index + 1),
span2=(pronoun_idx, pronoun_idx + 1),
label=int(1 if answer == 1 else 0))
record["coref"] = [t0, t1]
record.update({
"occupation": occupation,
"participant": participant,
"answer": ANSWER_VOCAB[answer],
"someone": str(someone),
"pronouns": PRONOUNS_BY_GENDER[gender],
"pronoun_type": pronoun_type,
"gender": gender.name,
})
return record
class WinogenderDataset(lit_dataset.Dataset):
"""Coreference on Winogender schemas (Rudinger et al. 2018)."""
# These should match the args to generate_instance()
TSV_COLUMN_NAMES = ["occupation", "participant", "answer", "sentence"]
def __init__(self,
templates_path: Optional[str] = None,
occupation_stats_path: Optional[str] = None):
templates_path = templates_path or get_data("templates.tsv")
occupation_stats_path = occupation_stats_path or get_data(
"occupations-stats.tsv")
# Load templates and make a DataFrame.
with open(templates_path) as fd:
self.templates_df = pd.read_csv(
fd, sep="\t", header=0, names=self.TSV_COLUMN_NAMES)
# Load occpuation stats.
with open(occupation_stats_path) as fd:
self.occupation_df = pd.read_csv(fd, sep="\t").set_index("occupation")
# Make examples for each {someone} x {gender} x {template}
self._examples = []
for _, row in self.templates_df.iterrows():
for someone in {False, True}:
for gender in Gender:
r = generate_instance(someone=someone, gender=gender, **row)
r["pf_bls"] = (
self.occupation_df.bls_pct_female[r["occupation"]] / 100.0)
self._examples.append(r)
def spec(self):
return {
"text":
lit_types.TextSegment(),
"tokens":
lit_types.Tokens(parent="text"),
"coref":
lit_types.EdgeLabels(align="tokens"),
# Metadata fields for filtering and analysis.
"occupation":
lit_types.CategoryLabel(),
"participant":
lit_types.CategoryLabel(),
"answer":
lit_types.CategoryLabel(vocab=ANSWER_VOCAB),
"someone":
lit_types.CategoryLabel(vocab=["True", "False"]),
"pronouns":
lit_types.CategoryLabel(vocab=list(PRONOUNS_BY_GENDER.values())),
"pronoun_type":
lit_types.CategoryLabel(vocab=["NOM", "POSS", "ACC"]),
"gender":
lit_types.CategoryLabel(vocab=[g.name for g in Gender]),
"pf_bls":
lit_types.Scalar(),
}
| apache-2.0 |
syhw/speech_embeddings | vq.py | 2 | 1899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ------------------------------------
# file: vq.py
# date: Fri May 02 12:10 2014
# author:
# Maarten Versteegh
# github.com/mwv
# maartenversteegh AT gmail DOT com
#
# Licensed under GPLv3
# ------------------------------------
"""vq:
"""
from __future__ import division
import os.path as path
import os
import fnmatch
import cPickle as pickle
import numpy as np
from sklearn.cluster import KMeans
import time
def get_stacked_files(stackroot):
for root, _, files in os.walk(stackroot):
for fname in fnmatch.filter(files, '*.npy'):
yield fname, path.relpath(path.join(root, fname), stackroot)
if __name__ == '__main__':
datadir = path.join(os.environ['HOME'], 'data', 'IFA_reformatted')
stackdir = path.join(datadir, 'stacked')
vqdir = path.join(datadir, 'vq')
files = list(get_stacked_files(stackdir))
t0 = time.time()
print 'loading data...',
X = None
for f, relpath in files:
if X is None:
X = np.load(path.join(stackdir, relpath))
else:
X = np.vstack((X, np.load(path.join(stackdir, relpath))))
print 'done. Time taken: {0:.3f}s'.format(time.time() - t0)
print X.shape
t0 = time.time()
print 'clustering...',
clf = KMeans(n_clusters=25, n_init=10, n_jobs=-1, precompute_distances=True)
X_vq = clf.fit(X)
print 'done. Time taken: {0:.3f}s'.format(time.time() - t0)
del X
with open('clf.pkl', 'wb') as fid:
pickle.dump(clf, fid, -1)
t0 = time.time()
print 'predicting...',
for f, relpath in files:
X = np.load(path.join(stackdir, relpath))
vqfile = path.join(vqdir, f[:4], f)
if not path.exists(path.dirname(vqfile)):
os.makedirs(path.dirname(vqfile))
np.save(vqfile, clf.predict(X))
print 'done. Time taken: {0:.3f}s'.format(time.time() - t0)
| mit |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/numpy/core/function_base.py | 23 | 6891 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y = y * delta
else:
# One might be tempted to use faster, in-place multiplication here,
# but this prevents step from overriding what class is produced,
# and thus prevents, e.g., use of Quantities; see gh-7142.
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| mit |
eryueniaobp/contest | Tianchi_License/fusai_hand.py | 1 | 4384 | # encoding=utf-8
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
def fore_holidays():
"""
节后第二天 标注出来;方便直接处理.
:param df:
:return:
"""
fd = 'fd'
spring = 'spring'
qingming = 'qingming'
laodong = 'laodong'
duanwu = 'duanwu'
zhongqiu = 'zhongqiu'
guoqing = 'guoqing'
other = 'other'
hdays = [
('2016-06-09', 3, duanwu),
('2016-09-15', 3, zhongqiu),
('2016-10-01', 7, guoqing),
('2016-12-31', 3, fd),
('2017-01-27', 7, spring),
('2017-04-02', 3, qingming),
('2017-04-29', 3, laodong),
('2017-05-28', 3, duanwu),
('2017-10-01', 8, guoqing),
('2017-12-30', 3, fd),
('2018-02-15', 7, spring),
]
def expandfore(buf,fore):
nb = {}
fb = {}
for day, span, tag in buf:
nday = datetime.datetime.strptime(day, '%Y-%m-%d') + datetime.timedelta(days=span + fore - 1)
nday = nday.strftime('%Y-%m-%d')
nb[nday] = tag
return nb
foredays = expandfore(hdays,1)
return foredays
def leak_check():
# day = datetime.datetime.now().strftime('%Y%m%d')
day = 20180304
df = pd.read_csv('./submit/{}.csv'.format(day), header=0, sep='\t')
aux = pd.read_csv('./train.csv',header=0)
df = pd.merge( df , aux[['date', 'ds' , 'holiday', 'holiday_name']] , on ='date', how='left')
df.to_csv('./submit/{}.train.all.csv'.format(day),index=False)
pred_sum_df = df.groupby('ds')['cnt'].sum().reset_index()
real_sum_df = pd.read_csv('./data/real_sum.csv',header=0)
mdf = pd.merge(pred_sum_df, real_sum_df, on = 'ds')
mdf['rate'] = mdf['cnt_y']/(mdf['cnt_x']+1) # real/pred.
mdf[['ds','rate']].to_csv('leak.rate.csv',header=True,index=False)
def hand_check2():
# day = datetime.datetime.now().strftime('%Y%m%d')
day = 20180303
df = pd.read_csv('./submit/{}.csv'.format(day), header=0, sep='\t')
aux = pd.read_csv('./train.csv',header=0)
df = pd.merge( df , aux[['date', 'ds' , 'holiday', 'holiday_name']] , on ='date', how='left')
df.to_csv('./submit/{}.train.all.csv'.format(day),index=False)
rate = pd.read_csv('leak.rate.csv',header=0)
df = pd.merge(df, rate, on ='ds')
plt.plot(df['cnt'])
print df['cnt'].describe()
print '--' * 8
df['cnt'] = df['cnt'] * df['rate']
plt.plot(df['cnt'], label='leak')
plt.legend()
plt.show()
print df['cnt'].describe()
"""
fd = 'fd'
spring = 'spring'
qingming = 'qingming'
laodong = 'laodong'
duanwu = 'duanwu'
zhongqiu = 'zhongqiu'
guoqing = 'guoqing'
"""
df['cnt'] = df[['holiday_name', 'cnt']].apply(
lambda row: min(5, row['cnt']) if row['holiday_name'] in ['fd', 'spring', 'qingming', 'laodong','duanwu','zhongqiu','guoqing'] else row['cnt'], axis=1)
#########fore holidays
foredays = fore_holidays()
print foredays
print '\n\n Before foredays'
print df['cnt'].describe()
print df[ (df['brand'] == 5) & (df['ds'].isin( foredays.keys()))]
#df['cnt'] = df[['ds' , 'brand', 'cnt']].apply(lambda row : 1000 if row['brand'] == 5 and row['ds'] in foredays else row['cnt'] ,axis=1 )
print '\n\n'
print df['cnt'].describe()
df[['date','brand', 'cnt']].to_csv('./submit/{}.hand.reg.txt'.format(day) ,index=False, header=False, sep='\t')
df[['date','brand', 'cnt', 'ds', 'holiday_name']].to_csv('./submit/{}.withdate.csv'.format(day) ,index=False, header=False, sep='\t')
def hand_check():
df = pd.read_csv('20180226.withdate.False.csv',header=None , sep='\t')
# df =df.iloc[1: ,:]
df.columns = ['date', 'ds', 'dayofweek', 'score', 'holiday', 'holiday_name']
print df.shape
print df['score'].describe()
df['score'] =df[['holiday_name','score']].apply(lambda row: row['score'] if row['holiday_name'] in ['normal', 'saturday','sunday','work'] else 30, axis=1)
print df['score'].describe()
print df[~df['holiday_name'].isin(['normal', 'saturday','sunday','work'])].shape
print df.shape
df[['date', 'score']].to_csv('hand.reg.csv', index=False, header=False, sep='\t')
if __name__ =='__main__':
# hand_check()
hand_check2()
# leak_check()
# main()
# infer()
| apache-2.0 |
dhruv13J/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
geodynamics/burnman | examples/example_layer.py | 2 | 6460 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_layer
----------------
This example script is building on example_beginner.py.
In this case we use the layer Class. Instead of computing properties at
pressures defined by the seismic PREM model (as is done in many examples),
we compute properties at pressures self-consistent to the layer.
Layer can be used to evaluate geophysical parameter, such as
the Bullen parameter or the Brunt_vasala frequency.
Through the 'modified_adiabat' temperature setting it allows for inclusions of
thermal boundary layers.
Layers can also be used to build an entire planet (see example_build_planet.py)
*Uses:*
* :doc:`mineral_database`
* :class:`burnman.layer.Layer`
* :class:`burnman.composite.Composite`
* :class:`burnman.seismic.PREM`
*Demonstrates:*
* creating basic layer
* calculating thermoelastic properties with self-consistent pressures
* seismic comparison
"""
from __future__ import absolute_import
# Here we import standard python modules that are required for
# usage of BurnMan.
import numpy as np
import matplotlib.pyplot as plt
import burnman_path # adds the local burnman directory to the path
# Here we import the relevant modules from BurnMan.
import burnman
from burnman import minerals
import warnings
assert burnman_path # silence pyflakes warning
if __name__ == "__main__":
# This is the first actual work done in this example. We define
# composite object and name it "rock".
rock = burnman.Composite([minerals.SLB_2011.mg_perovskite(),
minerals.SLB_2011.periclase()],
[0.8, 0.2])
# Here we create and load the PREM seismic velocity model, which will be
# used for comparison with the seismic velocities of the "rock" composite
seismic_model = burnman.seismic.PREM()
# We create an array of 20 depths at which we want to evaluate PREM,
# and then query the seismic model for the pressure, density,
# P wave speed, S wave speed, and bulk sound velocity at those depths
depths = np.linspace(2890e3, 670e3, 20)
with warnings.catch_warnings(record=True) as w:
eval = seismic_model.evaluate(['pressure', 'gravity', 'density',
'v_p', 'v_s', 'bullen'], depths)
pressure, gravity, seis_rho, seis_vp, seis_vs, seis_bullen = eval
print(w[-1].message)
# Here we define the lower mantle as a Layer(). The layer needs various
# parameters to set a depth array and radius array.
lower_mantle = burnman.Layer(name='Lower Mantle', radii=6371.e3-depths)
# Here we set the composition of the layer as the above defined 'rock'.
lower_mantle.set_material(rock)
# Now we set the temperature mode of the layer.
# Here we use an adiabatic temperature and set the temperature at the
# top of the layer
lower_mantle.set_temperature_mode(temperature_mode='adiabatic',
temperature_top=1900.)
# Alternatively, we choose a user-defined temperature, given by the
# Brown & Shankland geotherm
# lower_mantle.set_temperature_mode(temperature_mode ='user_defined',
# temperatures =burnman.geotherm.brown_shankland(depths))
# And we set a self-consistent pressure. The pressure at the top of the
# layer and gravity at the bottom of the layer are given by the PREM.
lower_mantle.set_pressure_mode(pressure_mode='self-consistent',
pressure_top=pressure[-1],
gravity_bottom=gravity[0])
# Alternatively, we set a user-defined pressure given by PREM
# lower_mantle.set_pressure_mode(pressure_mode='user-defined',
# pressures = pressure, gravity_bottom=gravity[0])
lower_mantle.make()
# All the work is done, now we can plot various properties!
fig = plt.figure(figsize=(10, 6))
ax = [fig.add_subplot(2, 2, i) for i in range(1, 5)]
# First, we plot the p-wave speed verses the PREM p-wave speed
ax[0].plot(lower_mantle.pressures / 1.e9, lower_mantle.v_p / 1.e3,
color='r', linestyle='-', marker='o', markerfacecolor='r',
markersize=4, label='V$_P$ (computed)')
ax[0].plot(pressure / 1.e9, seis_vp / 1.e3, color='r',
linestyle='--', marker='o', markerfacecolor='w', markersize=4,
label='V$_P$ (reference)')
# Next, we plot the s-wave speed verses the PREM s-wave speed
ax[0].plot(lower_mantle.pressures / 1.e9, lower_mantle.v_s / 1.e3,
color='b', linestyle='-', marker='o', markerfacecolor='b',
markersize=4, label='V$_S$ (computed)')
ax[0].plot(pressure / 1.e9, seis_vs / 1.e3, color='b', linestyle='--',
marker='o', markerfacecolor='w', markersize=4,
label='V$_S$ (reference)')
ax[0].set_ylabel("Wave speeds (km/s)")
# Next, we plot the density versus the PREM density
ax[1].plot(lower_mantle.pressures / 1.e9, lower_mantle.density / 1.e3,
color='g', linestyle='-', marker='o', markerfacecolor='g',
markersize=4, label='computed')
ax[1].plot(pressure / 1.e9, seis_rho / 1.e3, color='g',
linestyle='--', marker='o', markerfacecolor='w',
markersize=4, label='reference')
ax[1].set_ylabel("Density (g/cm$^3$)")
# And the Bullen parameter
ax[2].plot(lower_mantle.pressures / 1e9, lower_mantle.bullen,
color='k', linestyle='-', marker='o', markerfacecolor='k',
markersize=4, label='computed')
ax[2].plot(pressure / 1.e9, seis_bullen, color='k',
linestyle='--', marker='o', markerfacecolor='w',
markersize=4, label='reference')
ax[2].set_ylabel("Bullen parameter")
# Finally, we plot the used geotherm
ax[3].plot(lower_mantle.pressures / 1e9, lower_mantle.temperatures,
color='k', linestyle='-', marker='o', markerfacecolor='k',
markersize=4, label='used geotherm')
ax[3].set_ylabel("Temperature (K)")
for i in range(4):
ax[i].set_xlabel("Pressure (GPa)")
ax[i].set_xlim(min(pressure) / 1.e9, max(pressure) / 1.e9)
ax[i].legend(loc='best')
# At long last, we show the results! We are done!
plt.show()
| gpl-2.0 |
LiZoRN/lizorn.github.io | talks/nbzj-impress/code/txt/PacificRimSpider.py | 3 | 42651 | # _*_ coding: utf-8 _*_
__author__ = 'lizorn'
__date__ = '2018/4/5 19:56'
from urllib import request
from urllib.error import URLError, HTTPError
from bs4 import BeautifulSoup as bs
import re
import jieba # 分词包
import pandas as pd
import numpy #numpy计算包
import matplotlib.pyplot as plt
import matplotlib
from wordcloud import WordCloud #词云包
# headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
# cookies = {'cookie':'bid=0Hwjvc-4OnE; ll="118173"; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1522457407%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3DfrKwcZRSimGHLMvXj6iGkVFOXpPB1-x2KXgG3ytcgjHGTaXmDbel3nM5yObAEvcR%26wd%3D%26eqid%3D85cb540e00026f95000000045abedb3d%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.1673909815.1515314150.1521467190.1522457407.4; __utmc=30149280; __utmz=30149280.1522457407.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=223695111.1124617317.1522457407.1522457407.1522457407.1; __utmb=223695111.0.10.1522457407; __utmc=223695111; __utmz=223695111.1522457407.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __yadk_uid=13Q68v4czDmbhs7EwXEQH4ZeJDrf4Z0E; _vwo_uuid_v2=D223470BC8673F2EA458950B595558C7B|c34cbc6386491154b19d1664fe47b0d6; __utmt_t1=1; __utmt=1; ps=y; ue="valeera@foxmail.com"; dbcl2="140103872:B8C7nqlvWXk"; ck=6RJM; _pk_id.100001.4cf6=98fcd272a4c63ce7.1522457407.1.1522460095.1522457407.; __utmb=30149280.36.8.1522460095181; push_noty_num=0; push_doumail_num=0; ap=1; RT=s=1522460157064&r=https%3A%2F%2Fmovie.douban.com%2Fsubject%2F20435622%2Fcomments%3Fstart%3D260%26limit%3D20%26sort%3Dnew_score%26status%3DP%26percent_type%3D'}
# 一,数据采集
comment_list = []
for i in range(0, int(30027 / 20) + 1, 20):
url = 'https://movie.douban.com/subject/20435622/comments?start=%s&limit=20&sort=new_score&status=P&percent_type=' % i
try:
resp = request.urlopen(url)
except HTTPError as e:
break
html = resp.read().decode('utf-8')
soup = bs(html, "html.parser")
comment_div_lits = soup.find_all('div', class_='comment')
for item in comment_div_lits:
if item.find_all('p')[0].string is not None:
comment_list.append(item.find_all('p')[0].string)
# 二、数据清洗
comments = ''
for k in range(len(comment_list)):
comments = comments + (str(comment_list[k])).strip()
pattern = re.compile(r'[\u4e00-\u9fa5]+')
filterdata = re.findall(pattern, comments)
cleaned_comments = ''.join(filterdata)
#
# cleaned_comments = '影片放到中段的的时候景甜突然发现自己并不是反派于是妆容一下子就变淡了衣着也变得朴素了她放下身段从底层做起由一开始的霸道总裁变成了最后的机械电焊工剧情其实比好就是机甲打怪兽还多了机甲打机甲开菊兽合体等内容还有剧情反转大景甜第一次在合拍片中秀出了存在感中国元素多到泛滥啊说中国特供也不为过难怪外媒会酸但有句话怎么说来着不服憋着我决定重看下第一部确定下自己当初为什么如此痴迷那部电影大段的吧啦吧啦以及看不清的乱打这部到底在做什么全片毫无记忆点而上部我现在还记得集装箱如板砖一样狂拍开菊兽最可恨的是这部完全不燃了已沦为平庸好莱坞大片我要看大机甲打怪兽我要听第一部的不是来看你拖拖拉拉乱七八糟拍一堆文戏不是来看你蛇皮走位蹦来蹦去打不过就头脑简单一头撞死不是来看你五年之后特效反向进步十年我看是开倒车多放会儿说不定还能涨一星不知道除了主题曲和块多钱的特效还剩下些什么太开心大甜甜尴尬本尬想演个霸道女王风结果活脱脱一个乡镇女企业家悉尼和东京做错了什么尤其是悉尼天天见的街道风景太熟悉导致有点观感不真实东京那一仗太爽了四个机器人都好赞而且又开发出了新技术最喜欢女主自己造的跟屎壳郎一样的小机甲圆滚滚的太可爱灵活从剧情到打斗场面都透露着一股浓浓的廉价山寨片质感后半部分干脆拍成了向超凡战队看齐的青少年科幻片怎么鲜艳怎么来用充斥着无用说教台词的废戏和冷笑话填满分钟在其衬托下去年的变形金刚甚至都成了佳作怪兽和机甲都丑出新高度只有每次出场都换一套衣服的景甜是美的嗯一部百分百的好莱坞制作故事上延续了前作的世界观但这部续作在合作模式形象塑造故事创作和宇宙观设定上都远远超过了前作真的我应该看的是部盗版环太平洋山寨货即视感到爆炸连都变难听了而且讲真我竟然他们讲的中文都听不懂得看英文字幕台词尬到不行甜甜求求你在国内发展吧咱们别出去丢人了陀螺回来拍第三部吧最后我要给变形金刚道歉我错了我不应该说你难看的看到景甜一出来心里一咯噔不过最后看完觉得比我意料中好看太多对比第一部比奥斯卡更能体现陀螺的水平更荒谬的应该是那个黑人男主吧他到底是什么来头为什么哪里都有他为什么星球大战和这部都要选那个黑人啊长得很帅吗身材很棒吗演技很好吗下面很厉害吗全程尴尬无聊的剧情泡沫化的叙事为了大场面而大场面这无疑又是一部爆米花式的机器人大战怪兽的电影在电影院好几次都想上厕所所以这是一部充满尿点的电影为啥还是景甜大小姐就不能换个看着这电影不就是看特效吗智商党们麻烦去看悬疑片不然对不起你们的智商打得超级爽的最后景甜拯救世界中国万岁整部片质感很男主真的太丑了好赶客景甜好尴尬衣服换了一套又一套不知道她在做还是拍电影果然有景甜的都是大烂片希望她不要再拍戏了好喜欢第一部但是知道是景甜拍第二部就很失望了但还是去看了好浪费我的感情本片又名变形金刚之如来神掌大战合体哥斯拉怪兽编剧继承了女一必须父母双亡靠捡破烂然后拯救世界的套路想问问编剧难道美帝人民都是天才吗捡个垃圾就可以徒手建高达你把念过书读过的人情何以堪反派必须最后秒变纸老虎还要放句狠话我一定还会回来的大甜甜毁再接再厉吧我的妈呀景甜到底什么背景把菊地凛子写死了就是为了扩张景甜宇宙吧果然资本家的钱就是不一样啊庞大而密集的中国元素虽不至于太过尴尬但整体已经毫无上一部的暗黑美学感没有精神内核的环太平洋和多年前的奥特曼系列竟然有了异曲同工之处况且奥特曼好歹也是大几十年前的东西了你说你丢不丢人嘛活在被景甜拯救的世界里我不如去死我们的美人真剑佑在本片中给人留下的印象就只有一个大粗脖子真的看不出环太平洋的续集没看过环太平洋的还以为是变形金刚系列呢大甜甜的国际梦想秀代表着中国打怪兽哟哟切克闹你问我在哪里我根本就不知道什么剧情都不重要我甜即将要征服大宇宙国人钱好赚负分别人的续集都是恨不得把前作的优点无限放大到恶心这部倒好优秀气质一点不剩新人物建立不起来还把仅存的旧角色浪费干净连配乐主旋律都只出现了一次而且位置很奇怪无疑是年度最假续集景甜和也基本没交集比想象中好太多了弥补了第一部故事编排上面的问题并且创作了更鲜明的人物和更复杂的故事虽然失去了第一部的气势但是依然娱乐性很高东京大战依然是值回票价看完了本片还讽刺景甜的人真的就是傻逼了这是一部有景甜都不忍心减分的电影燃炸了多一星情怀少了无论在水平还是情趣上都非常的陀螺续作不再是那种黑夜中湿哒哒充满机油迷醉味道的调子更亮的场景和更灵活的机甲带来的是更为塑料感的观影体验当然对萝卜爱好者来说依然是不能错过的一大年度爽片毕竟出现和钢大木的世纪大合影这种事情就够高潮很久了点映看完哭着出影院不是有机器人就能向隔壁变形金刚比烂的好吗比大做为第一部的脑残粉完全不能忍受强势插入中英文混杂拯救世界后大汗淋漓的某星抢去本该属于机甲和怪兽的亮相时刻就像大招如来神掌时死死抓住甩不掉的小强被陀螺冷落也不无道理除了前作的主题曲续作几乎找不到亮点整部片子的质感很你知道有一套从天而降的掌法吗我只想知道日本做错了什么总是让他们一而再再而三碰见怪兽以及大面积居住地被摧毁菊地凛子做错了什么让她那么早便当还有景甜到底关系为何那么硬怎么每部打怪兽的国际戏都有她参演怪兽心里我到底做错了什么刚组装好成为巨型神受结果神威都没显就又要去见列祖列宗其实这是国际版奥特曼吧真得是尬这么好的一副牌竟然就这样打烂了最后的决战还不如第一部的开场第一部硬伤也有所以就不废话多打这一部废话这个多啊剧情空洞特效三星景甜出戏大家慎看景甜算我求你为什么光天化日就会魅力全无呢是技术不达标还是清风水上漂恐怕还是整体基调视觉氛围甚至是机甲恋兽情怀均不达标的综合原因我至今都忘不了菊地凛子撑伞在青衣基地出场的画面至于影迷津津乐道的级片其实续集这种级口味爆米花才是吧菊地凛子就这么成为森麻子了贾秀琰帅气脑洞大开牛逼的都藏在正片里了良心中国元素真多景甜的戏份堪比女二张晋蓝盈盈惊喜基本不太违和演员太用力时不时出戏一邵氏工业之所以出二五仔主要是党建缺位没有落实党管原则二不想上京的怪物不是好怪物具备了一个好莱坞爆米花大片该有的良好品相刺激的打斗和好笑的桥段安排在白天大场面也尽量满足了普通影迷的期待我知道你喜欢暗黑系故事按照手册写有反骨有绝望有反击这时候槽点来了机甲们在被几乎灭门后即刻被修复而面对大怪兽的终极武器竟是如来神掌年月观影比变形金刚好看最后居然出来个组合金刚比电影版大力神的效果好看的时候一直祈祷下一个镜头出现的是怪兽而不是景甜因为景甜的人设实在是太讨厌了尤其不爽的是一般来说她这种角色应该被当成幕后黑手干掉结果竟然是队友这就是来自中国的资本让人讨厌但无法拒绝已经完全没有了机甲和怪兽的独特美学魅力变成两个物体之间的争斗毫无人类智慧的无脑爽片最后的大怪兽看起来分明就是荒原狼嘛整部都很赞只有最后一招设置的太蠢了扣掉一分剧情分特效分预告应该是全片打斗高潮了我就看你环大西洋怎么拍博耶加小东木大甜甜简直就是温格麾下的扎卡伊沃比威尔希尔称之为你行你上三巨头没争议吧一人一星不过分吧哎可喜欢第一部了当年怒打五星来着凭空想象的科技也是值得夸奖了景甜这次演绎的比任何一次好莱坞合作都自然值得夸奖电影打斗怪兽可圈可点胆子大的话剧情特效都值得去影院一看披着科技元素和伪装的特摄片本质上依旧是奥特曼打怪兽不过爽快就行了这种片子除了视觉效果和激烈打斗之外其他并不重要看完觉得票价不亏全程无尿点当然科技宅可能不满意景大小姐这次发挥真的没败人品再这片子国内票房铁定超过美帝本土一部无人物无集中戏剧冲突无丰富镜头只有尴尬笑点的爆米花的正确打开方式便是玩味其中的政治所指放了大半部国际斗争威胁论烟幕弹之后开始煞有介事地反思赛博格化最终背离第一部原旨反而显得前半部更像重点戏里戏外景甜都是标准好莱坞视角中国人可一出来还是燃得想哭怎么办再不济也不至于两星吧真是生活水平提高了惯出豆瓣上越来越多的事儿豆瓣评分被这些事儿毁的越来越没参考价值陀螺良心监制细节狗血却简单粗暴不要因为景甜飞扬跋扈乱耍酷流浪者飞天砸毁就瞎黑机甲怪兽都有升级黑哥豪爽小萝莉叛逆干练菊池悲壮败笔是怪兽脑子入侵疯子科学家操纵机甲开启虫洞但是打机甲怪兽小密集群怪兽与怪兽合体最过瘾牛日本决战花样多打得狠至少硬碰硬不敷衍比之前预想的要好保留了陀螺的原创构架构思机甲和怪兽融合的点子是胖子的遗产有访谈为证是对世界观的补完而非续貂但视觉风格变了少了首部艺术色彩质感更加动漫化但燃点还是必须有的正片星情怀别再吐槽中国元素了没有中国资金这片根本没得拍但跑步机那块还是笑了中规中矩全靠铁拳砸脸撑满全场三星半真的很喜欢大荧幕上看机甲打怪兽这部不能和陀螺的相提并论纯粹娱乐来讲还是不错的简化人物关系和剧情内涵仅突出了机甲战斗和其他强续命的独立日一样求生欲很强但也没丢掉便当习俗略欣慰彩蛋有独立日的影子呀我还挺喜欢波耶加且更爱甜甜甜甜比心心景甜环太分并没有爆烂机甲打机甲那两场戏都可圈可点主题上和这两天中美贸易大战撞车啊只是换成了科技制衡你中文太烂了回家练练再和我说话景甜依然最大笑点就地枪毙还有吊绳溜冰哈哈哈哈另外我好像在东京看到了京东这太难判断了从左向右读还是从右向左读都对无限怀念四年半前的那个暑假看完第一部时的激动还能忆起恨不能马上看到续集的畅想年月唯有靠一小段原不至幻灭到零不如重温原声更过瘾将近一半的冗长尴尬文戏铺垫所为何事宣扬团队精神与家人概念近乎无聊事实证明讲不好中文无法拯救世界颤抖吧歪异果星仁其实环太平洋就是成年人的奥特曼为什么环的评价各方面都不如环因为换导演了吉尔莫德尔托罗他为了水形物语推掉了环结果大家都知道了水形物语成了奥斯卡大赢家最佳影片最佳导演系列电影除非第一部口碑票房双扑街否则不能轻易换导演不知道说了些什么只记依偎在谁的肩膀分钟啥第一次出现主题曲的时间恐龙战队续集啥这才是这部电影的真名字景甜啊真正的女主角还行比变形金刚系列强一丢丢既然剧情不行就应该把第一部的主题曲不要钱一样循环播放呀导演请你听听群众的呼声怪兽请暂停攻击一下中美双方围绕机甲要不要征收的关税展开了激烈的讨论毕竟调查征税门类里有工业机器人项如果你抱着看一部爆米花大片以外的期待买票那真的就是你自己的问题了但是即便是带着正确的心态入场环还是会令你失望这部电影里没有你从未见过的特效场面也没有让你感到激动的故事尽量客观说环太平洋的剧情仅有的看点基本全在怪兽方包括先驱对人类大脑长达十年的入侵还有开菊兽合体勉强可以算作小高潮但是黑人主演是真的不行人设演技台词一样拿得出手的都没有作战策略只会一起上还不如景格格最后几分钟跑步机的戏拉好感真子便当完全不能忍第一部之所以有死忠不是因为怪兽不是因为机器人对撞和大场面是因为机甲崇拜在看之前我就说虽然这个班底就注定是烂片但只要给我三分钟驾驶机甲的临场感我就满足了结果一秒钟也没有说是变形金刚也没人反对全程面无表情看完像玩具被抢走后委屈想哭唯一的亮点是换装之后的景甜哈哈哈外星人都学不会中文话说回头无脑机甲爽片的要素都在可拍三部的内容一部齐活部分内容还有点儿感打戏之外还在东京卖了情怀没什么好抱怨的在整体的中二气氛烘托下连景甜也变得好好看呢不多说了我要去打猎啦三傻闹东京然后来了拆迁队接着三傻合体了虽然不是的概念但无人机暴走真是分分秒最后拆东京还暗示了第三东京地下城的存在一口一个太穿越了虽然也有很强烈的青少年科幻倾向但比要成年一点最后的拆东京就像是某些变形金刚哥斯拉元素的重组六分逗我呢機甲打戲不錯劇情不會太無聊然後我必須說自從看過章子怡在柯洛弗的表演之後我對景甜的接受度上升哎呀原來是變形金剛阿幹嘛叫自己環太平洋愛的人是不會喜歡的不是只要機器人怪獸打打架就好耶之所以經典是因為懂得並實現了日系機甲動畫與特攝的精華看這些長大的人真的會熱血沸騰而在換掉導演的續集蕩然無存了支持国产支持黑人支持景甜五星第一部的情怀全失因为第一部的我才给它一星景甜就不说了好莱坞那么多有演技的黑人男演员为什么星球大战和这部都要选那个一点观众缘都没有的蠢蛋不要以为白天的戏多真实感就有所上升屁跟奥特曼打小怪兽似的机甲和怪兽都没有阴影的什么鬼中国特供人傻钱多速来如果说环太平洋是虎虎虎的话那么环太平洋就是空天猎整部电影的感觉就像电梯里放的那首变调的一样那细腰的机甲头上长个角不就是那谁了吗那明艳的橘红色不就是那谁吗那插入栓如果做成细圆柱体不就是那什么了吗我看到东京地下逃生电梯的配色怪兽来袭的字时都要哭了我给五星一公司参与了一点投资二把大中国说的很三中国演员品牌都有足的戏份然后说电影本身看的首映杜比影厅送了海报开头大哥单口相声啊中间也很乱特别不连贯结尾更莫名其妙一下死了虽然多了机甲机甲但走变形金刚的套路太严重了本身三星水平中国到底投资了多少钱还邵氏请了张晋都不让人打一场和怪兽总是要去日本街市跑一场的真嗣身世的黑人这么壮硕那明日香在哪里最后那招是无敌如来神掌海外变种吗你记不记得有一套从天而降的掌法这故事根本让人燃不起来你拿第一部的逼我我也燃不起来啊卡司的演技让人捉急小个女主用力过猛小东木压根面无表情这俩人平均一下刚好令人意外的是景甜相比之下竟然还过得去话说得亏有大甜甜了要不然真心更没眼看我期待了两三年你给我看黑人小哥的青春期孩子的打架京东挂在上海的广告各国人的刻板印象怪兽爬山机甲坠落以及景田小姐的脸色陀螺导演当年的第一部让人眼前一亮这么多年被我们津津乐道我怀着最低期待值前来但大部分时间让人如坐针毡随处是尴尬的台词莫名其妙的黑化乱七八糟的打斗拯救世界的鸡汤随意乱晒中文台词的翻译腔也让人抓狂但这一切都在最后的如来神掌面前相形见绌相比之下当年陀螺简直是拍了部杰作阿这就是好导演与坏导演的差距假如菊地凛子的热血还能再爆发一下假如那栋怪兽骨架边的海滨豪宅不仅仅只是功能性过场假如最后那招大绝杀别搞得那么像功夫或许我还能多喜欢一点可惜拿了奥斯卡的陀螺目测已经彻底放弃该系列我虽然想看点不需要用脑的电影但是也不能这么侮辱我的智商呀没有第一部精彩但是还好没有玩脱剧情特效打斗戏还是能看的景甜宇宙第三部也是最好看的一部战斗燃爆特效一流场面宏大剧情热血说像奥特曼的朋友你确定你真的看过奥特曼吗电影五星景甜扣半星四星半推荐四舍五入算五星气质上太像变形金刚独立日安德游戏景甜这个角色真是一言难尽啊谁说没有违和感的前作受到的好评很依赖于那种沉重粗糙的打击感机械感机甲每挥一次拳都超带感麻子高喊母语刀斩怪兽简直爆燃啊这部续作基本是反前作而行之拜托没必要再拍一部变形金刚啊什么环太平洋纯粹就是变形金刚基本是按一个剧本大纲填充起来的标准流水线产物德尔托罗之前所构筑的庞大世界观未能有丝毫拓展甚至还萎缩了不少灌注的趣味感也消失殆尽没了陀螺来了景甜无论是故事设定还是特效动作场面几乎都在全面倒退就连结尾也似乎是照搬功夫还记得那招从天而降的掌法吗一点都不燃没有厚重的金属感没有了巨型机甲的压迫感前一部的诸多爽点没有得到延续唯一的一点兴奋感还是响起前作背景音乐的时候景甜拯救世界怪兽灭地球机甲打怪兽英雄驾机甲景甜救英雄所以就是景甜救地球景甜救人类景甜救世界颤抖吧怪兽颤抖吧人类身为昭和系特摄粉较之从头爽到尾的第一部这部看得几乎毫无感觉估计德胖看了也不会有多大感觉估计卖拷贝看了会很有感觉估计导演压根儿就没搞明白第一部的成功是因为什么感觉做再多烂片的心理预设也还是没料到能烂到底掉光作为导演处女作也不强求有人家奥斯卡导演在美学风格趣味上万分之一的追求所以根本没在这期待但好歹把目标观众群设在中学生啊这个繁杂冗长靠各式初级编剧技巧勉强达到糊弄小学生的金酸霉级空洞剧本是要作什么妖前一小时几乎废的电影里一共出现了三个次元地球怪兽的次元景甜的次元死于重力势能转化的动能加热能虽然没能达到预期有些遗憾但在屏上看巨大怪兽和机甲场面和效果还是很不错的几场打戏依旧震撼人心可惜熟悉的背景音乐响起配的画面着实糟糕新的配乐到爆故事有所增强但这种片要故事就跑偏了景甜依旧是电影界的演技还给她脸那么多特写景甜真的承包了我所有的笑点电影很真各种山寨廉价气息各个方面都远不如第一部这应该是超凡战队而不是环太平洋再说说景大小姐一个国内基本没什么票房号召力口碑也差的女星演技也永远停留在各种姿态的自恋中但是不但国内各大导演各大明星甘愿做绿叶而且无休止的赖上好莱坞这黑幕潜规则也太张扬了剧情较之前面的有所进步景甜在影片中也有存在感但是大场面的堆砌让人产生审美疲劳还行吧总体不如第一部特效不错打斗场面再多一些就好了看的不过瘾想给个分片子不算难看编剧确实有花心思比其他一些爆米花大片好点结尾的解决方案太粗暴当然远远比不上第一部了看特效就够值回票价了续作特别想要证明自己通过弱化标志性的主题曲杀死菊地凛子可以不爱但不要伤害景甜的译制腔台词等方式来努力的切断与前作的相同点但这正是我们特别不想看到的改变我们想要看的是各个不同国家特色的猎人机甲驾驶员在城市里与浴血厮杀而不是一条无聊又无趣的阴谋故事线星没有了上一部宏大而令人振奋的配乐看片的时候感觉好平淡还有景甜太容易让人出戏比起陀螺的第一部逊色了不少起码第一部还是有些神秘的黑暗风格这一部完全是色彩鲜艳的各种铠甲增加的机甲对打还算是有新意反派这智商是统治不了地球的我大景甜是要统治地球了最后竟然还致敬了星爷的功夫大量的中国投资和中国元素真的很多这完全是一部中国主导的中国制造没有第一部有诚意了整部电影可以用差劲说了看完好想当怪兽噢好莱坞科幻大片里有东方面孔而且还是很有感觉的这次比较新鲜尤其剧情比较紧凑特效逼真的看的过程中有被吓到了特效加一分景甜加一分剩余实际两分還可以吧覺得動作場面比上一集還多配樂也不錯依爽片看很值了还可以把超过预期就是文戏时间拉得那么长却没有把几个机甲训练员的性格描述清楚也没有讲他们哪个人训练哪个机甲我记得里面大家是不能随意驾驶任意一辆机甲的而且那么大一个军事基地又十年过去了应该有一批成熟的机甲员才对啊为什么非要让还有个月才完成训练的学员拯救世界呢无趣既看不到实体化的动漫风也看不到迷影恶趣味就是一无脑大片没变态金刚那么傻可也好不到哪儿去除了最后的一场东京大战之外没什么像样的打戏怎么看怎么都该是大反派的邵氏公司却像某甜的妆容一样永远伟光正简直就像梦里进了只苍蝇一样烦人景甜再次拯救了世界剧情还可以吧最让人感到尴尬的是说中文的时候真的很没有气势啊导演小时候肯定没少看奥特曼同时也没少受变形金刚的影响这点并非臆测因为全片都挺杂糅的加一点赛博朋克来一点废土美学有点怪兽文化再致敬下陀螺最值得一提的是融合生命的设计不过喜欢拍白天的巨幕导演总是值得夸赞的景甜真的蛮适合拍这种高贵冷艳的无表情角色大就这么撞一下就死了史诗级烂片并衬托出第一部的伟大相比于有些找失望没有糟糕的地方也没有精彩的地方或许美片就图这点热闹特效堆砌而成的好莱坞大片景甜比想象中有存在感比差远了还没开始就结束了可是高潮迭起令人窒息麻子上部那么艰难活下来居然就这么憋屈被发了盒饭我真想拍死这智障导演上部男主也不知去哪了打怪兽时间短的不行结局敷衍期待了几年就拍成这样失望看得热血沸腾差点鸡冻地哭出来的第一部怎么到了第二部会这样震惊东京街头惊现三头怪兽奥特曼为什么迟迟不肯出现究竟是人性的丧失还是道德的沦丧欢迎走进今天的环太平洋电影剧情超级简单但毫无燃点跟第一部不在一个水平看完就是内心异常平静无法忽略大甜甜的存在简直是女主角般的存在跪拜告辞好久没在电影院看这么难看的电影了瞧瞧看还是纸老虎在背后捣鬼冰天雪地跪求第一部人马回归还不错比想象中好老是黑大甜甜这次老实讲挺好的英语也有进步有希望成为口语一线水平不知道为什么看到这种电影里的中国人我怎么感觉自己跟到了动物园看到猴子一样心情激动看得很爽为此片的评分打抱不平多给一星变形金刚的机甲看得审美疲劳了环太平洋的巨型机甲看起来还是很震撼他的笨重不灵活相对于变形金刚是加分项神经元结合的设定其实可以深挖提升内涵可惜了机甲嘛大就对了越大越帅特别是久违的响起来后简直不要太帅裹脚布文戏最具魅力的菊地凛子登场不到十分钟就领了盒饭然后是景甜阿姨带着几位小鲜肉挑大梁超凡战队的即视感首部将打斗场景安排在太平洋的雨夜真是明智之举这部把战场移到大都市亮堂堂的白天是要向变形金刚靠拢了可特效都没人家做得有质感口碑扑街合理你还记得有招从天而降的掌法么不对这是天马流星拳吧哈哈哈不过精日份子真的是可恨至极除了不像前作真的像好多影视作品打起来像变形金刚后面像进了城像哥斯拉整体又仿佛和独立日颇有激情连下集预告都像他们对迷影有种误解好像把各种机甲揍怪兽拼起来就是环太平洋了少了陀螺是不行对了为什么怪兽没开啊一个不错的剧本被稀烂的节奏粗糙而毫无厚重感的特效以及磕了大麻一般的疲软配乐拖累的乏味无力游戏般的机甲设计和场景酷炫十足却极度缺乏前作的细节和冲击力总的来讲只能归结于导演对节奏和分镜的把控差距太大顺便虽然景小姐的演出没那么糟糕但一边说中文一边说英文真的很尴尬啊我去你记不记得有一招从天而降的掌法莫非是那失传已久的如来神掌还不错机甲很帅最开始出场的拳击手感觉还挺帅的没想到和部队注册的机甲一对比像个玩具不过最后的胜利也少不了拳击手的相助机甲打机甲机甲打怪兽挺过瘾的额外给电影里的中国元素一颗星这种大科幻里有中国演员还说着中国话感觉还是很不错的太乱来了糊里糊涂毫无章法尴尬的文戏弱鸡的打斗屎一般的剧情还行吧景甜没那么尴尬了星文戏弱智打戏不爽当台词说你们父母是谁不重要时镜头给到了赵雅芝儿子本片还有伊斯特伍德的儿子千叶真一的儿子以及甜甜景甜扮相百变但开口就变国产剧终极怪兽死的窝囊没啥必杀技就是血厚几个小幽默小反转算亮点第三部要想在中国大卖只有一招复活暴风赤红法国抢先美国全球首映吐槽怪我咯但是确实和大锅炖的变一样烂得不相上下我记得环还是年前和小虎爸爸一起看的超级燃景甜姐姐光环让我想撕屏满屏的尬演青少年的确适合打入小学生消费群看得够爽就行了别无他求啊真的有点舍不得无论如何也想再见到你请答应我一定要再出现好嘛是的就是你已经分手的华人之光大甜甜还有辐射和用完啦下一次抄什么呢景甜并不是环太平洋最烂的存在还我第一部主题曲菊地凛子给景甜做配角东京惊现京东植入开菊兽三位一体如来神掌从天一击毙命你确定我看的不是环大西洋各方面不如第一部啊好怀念看第一部的夏天不过当爆米花也不难看毕竟影院里面看机甲片的机会也不多了五年了这个系列就这么结束了同组机甲战士可以训练跳镜子舞同步率百分百景甜霸道女总裁下基层完成社会主义改造上一集犹记得菊地凛子雨中撑伞等直升机芦田爱菜废墟奔跑等救星续集刚看完就失忆作为环太平洋的续作雷霆再起其实深知前作的短板力图在剧情上构建更为充沛的张力但实际上脸谱化的人物和空洞乏味的台词使耗费大量时间所做的剧情铺垫几乎成为了无用之功而在前作中那股昔日的赛博朋克风在这部续作里亦荡然无存感觉和第一部比差太远了不是演员的问题是剧本的问题最后送死的那个机甲完全是为了送死而送死啊还有想让新队员登场没必要非得弄死老队员吧失望改成低幼向了吧不成功简直烂到昏昏欲睡这剧本写的这景甜演的怎么能用一个烂字就形容得全真正的狗尾续貂略拖前半小时没怎么看也能跟上节奏不过打戏还是非常燃的激动较上部还是陀螺执导好一点剧情有进步新加的中国元素也并没有想象的那么尴尬大甜甜不适合走高冷路线寄生虫好像饕餮打一二星的以后别看机甲片了没必要环太平洋雷霆再起还算是有些干货的至少挨过一个小时的无聊会迎来半小时的酣畅一战只是矛盾有点多上一部是机甲斗怪兽这一部却成了怪兽开机甲那么牛逼的怪物机甲一撞就死对东方异域极度迷恋却仍难逃演员短命毁个片甲不留的好莱坞式定律这样就能讨好中国观众了要不是景甜的换装秀和东京街头的京东广告这么无聊的东西怎么可能看得下去啊和变形金刚独立日一个套路大棚电影剧情单薄逻辑不通就他妈拍来骗中国人钱的变形金刚奥特曼真的很不好看了比起差远了然而也并不是很好看哎一定要说又什么可以的大概是第一部到这部还在用的吧不用迟疑没第一部好就是了我觉得陀螺自己对这电影都是拒绝的当然要是有记者问他他肯定不会说出来等了五年意外觉得还不错男主尬演机甲浮夸缺少质感有的情节没展开但是整体故事讲的流畅节奏也得当情节有反转和惊喜怪兽特效不错以及大甜甜总算没那么出戏值七分吧如果你是变形金刚的粉丝你可能会喜欢本片无论是特效场面人物塑造都很类似变形金刚系列产品只不过机器人更大只而已如果你是环太平洋第一部的粉丝你会失望的怪兽的戏份还不如景甜多仅有怪兽出场的最后几分钟才让我觉得算有些欣慰星半决战富士山下景甜拯救世界刚开场最喜欢的麻子就跪了同观影小朋友的妈妈甚是惋惜小朋友却说她不是日本人么日本人不都是坏人么小朋友妈妈竟无言以对满场的亚洲面孔证明老外爱看变形金刚爱看怪兽电影可就是不爱看机器人打怪兽所以还有续集的话也别假惺惺找黑哥做主角了直接扶正大甜甜多好一路给你从长城打到骷髅岛再打到环太平洋最后打去外太空完美换了导演到了这第二部只能说各方面十分凑过整体勉强及格另外不吹不黑景甜不仅是电影里的关键角色而且表现居然相当可以比金刚骷髅岛里可有可无的面瘫路人进步了十万个长城堪称本片最大惊喜还行吧作为无脑爆米花电影我觉得可以的萝莉拯救世界是必须的了大甜甜存在度提高不少中国基地中国将军中国军火商三星可以有加一星孩子说好喜欢军刀掏钱人物动机太牵强附会了逻辑漏洞大到几乎无法自圆其说最后的大战简直潦草不堪以及从星战开始就无法忍受的男主的颜值直接重映行吗咱别拍了一场砸了很多钱但就是不起来的趴人物一个也立不起来比景甜还要没有观众缘好厉害竟然能上我只想安静的看变形金刚打小怪兽结果三分之二时间都在整那些无脑又蠢到爆的剧情结尾也很无语就跟一那样安静的从头打到尾不好吗生气又名景甜的换装游戏哈哈哈今天晚上看的剧情还行感觉那个权将军好惨啊就这样领了盒饭特效好看话说大甜甜知道不知道她刚出场的口红色号不适合她这片差不多了被黑的有点惨不知道是不是因为有景甜没有太多的亮点但是机甲战斗的戏份还是挺多的相比黑豹古墓是实打实的爆米花电影了没有的重金属感但看的绝对爽没那么不堪大甜甜演的还不错建议观看后半段机器人打小怪兽还是很热血的一个怪兽不够再来一个两个不够三个总该够了吧但是情怀不是这么卖的何况环太还没有到可以卖情怀的地步不要计较剧情漏洞小东木和之间毫无火花小女主比不上芦田爱菜中英台词切换生硬等等等要时时刻刻保护大甜甜关键时刻还要靠大甜甜一记神助攻着实惊艳如来神掌的点子很妙怪兽形态和作战场面全面提升怪物生化机甲是亮点东京之战很燃亚德里亚霍纳是颜值担当几个大特写相当养眼了这次的机甲最爱复仇黑曜石喜欢这类型的电影热血故事情节什么的也挺好的可是评分不高是怎么回事再次感叹人家的特效真是棒编剧一定重温了吧量产机暴走很有当年号机的感觉嘛军刀雅典娜赛高不至于那么难看吧这不就是小时候看的奥特曼吗为啥成人还看这种低幼片为啥大家的理想总是拯救世界为啥我的男神张晋这么快就领盒饭了为啥直男喜欢看这种片陪男票看得我一脑子问号除了团战那几秒有点精彩外其他真的好一般最后我出戏到如来神掌了中国元素根本融合不进去一度感觉是在看国产片这批的机甲战士的人物形象没一个能立起来包括主角照理菊子的死应该能激发人物爆发但依然吊儿郎当到最后牛逼人设完全体现不出来小女主也无感基本上每个人都是打酱油的景田的角色更好笑全程令人出戏喜欢的朋友们答应我别看好么明明时长是标准的分钟就是给人一种内容不够的感觉多分钟的时候强化之前的还没登场整部结构显得头重脚轻前期一直在刻画女二号的成长看番位景甜女一然而女二的总体戏份又不多莫名其妙的编排本月最烂啦啦啦期待这部电影好久了好喜欢啊也好期待啊真的很棒希望你都去看下在第一部的版剧情前面加了一个剧场版的剧情看到那些无人机甲的时候瞬间想到量产机当然编剧还是花了心思做反转的新机甲新怪兽都很好看怪兽合体很棒打得也很爽但是机甲的动作都过于流畅失去了第一部真实的机械的笨重和凝滞感最后怀念一下天国的真人版怪兽摧毁城市的时候街道竟看不到一具死尸说明人类和怪兽还是可以和平共处的第一部我可是给了五星的啊看第二部这德性还不如去看奥特曼景甜张晋什么的真是太尬了中国资本能不能干点儿好事儿操纵机甲战士也是挺累的在里面跑啊踢的怪兽想用血液与富士山的稀有元素混合有点儿意思和第一辑的风格甚至是故事都已经相去甚远这次的便当发得相当不开心不过作为一部独立无脑的爆米花片还是有打有特效算是热闹景甜小姐没有阻碍观感但是看多她生活真人秀的样子就会发觉这个御姐形象是如此不合适最开心的是我的博士加戏了一大堆说教台词和尴尬玩笑勉强凑齐分钟就算了就放一下而且大战时间很短怪兽死得也很不可思议最开始就感觉陀螺不执导多半扑街果然缺乏第一部的那种燃传奇怪兽宇宙第五弹及景甜大战怪兽第三弹中国演员中国元素的大量植入让我们更加看到了好莱坞有多需要中国市场如派拉蒙老板所说没有中国市场好莱坞很可能就活不下去了还有就是景甜拯救了世界对就是她特效三颗星除开一点文戏外就是机甲打机甲和机甲打怪兽还是打的比较刺激的场面很大剧情比较弱智就为了打起来随便编的大甜甜浓妆的时候比较噶演暴发户企业家的节奏淡妆机械师的时候还挺好的男主就是星球大战的丑黑人女主的小机器人还挺好玩的绝对水平有三星但碍于前作太过耀眼相比之下本作是剧作美学配乐甚至动作场面的全方位溃败景甜仿佛是为了证明社会主义的优越性而存在的超人大小机体捉迷藏悉尼壮烈道别富士山团战万法朝宗如来佛掌虽然远没第一部的厚重质感赶鸭子囫囵吞枣的烂片节奏和灾难般的景甜但怪兽机甲化多功能组合往又进了一步以极低的期望去看感觉烂得还算彻底和开心导演你记得环大西洋吗中规中矩的剧情打怪兽还是很燃的比第一部好多了无视中国演员七分嘻嘻珍惜这个景甜吧换跑道了没几部好看了女文工团花穿越进各路好莱坞大片的的故事本身就很科幻以及雄霸的儿子比白娘子的儿子更俊美一些动作片打就完事了要是不看片名还以为你卖拷贝又拍了一部变形金刚虽然剧情稍微精彩一点点但依然全程是尿点不知道哪里该尬笑一下或者尬哭一下伊斯特伍德这儿子演技可以和大甜甜拼一拼好在大甜甜还是挺美的老美请别再拍中国特供片来骗钱了谢谢长不大的男孩这片适时描写了中美关系军事基地和邵氏的关系很像传奇和万达当初的关系一开始有层精神交战的意思美国对东方崛起的经济侵略的惧外心理值得玩味后来怪兽控制机甲如果能挑拨离间让双方开战坐收渔翁之利会比现在更有趣更现在还是流俗了不过导演本人还是深受华语武侠片和日本剑戟片的影响华裔阵容加一星景甜表现超乎预期加一星拳击手的短腿萌加一星最后一星因为青岛啤酒国外很多影评竟然说比第一部有提升真的是无语第一部陀螺个人风格影响下避免的很多大片通病在第二部基本全部都有漫威后大片的通病人物过多故事线过于繁琐没有必要的反转平庸的动作戏和糟糕的节奏等等感觉主创真的没有理解第一部的好可惜了在最后时刻她硬是趴在男女主角背上上了天零点场摄影和不如整部下来和剧情契合度不高前半部分比较无聊铺垫太多后半部分的战斗更像是机体武器展示中国元素多到爆炸总的来说这部电影向我们宣告环太平洋系列以后要继续圈钱啦因为它真的太承上启下了很难做为一部好的完整的电影来欣赏阿玛拉平时训练怎么都连接不好决战一下就进入状态了与特种部队独立日的续集一样先请上部老人领完便当然后让毛头小子们拯救世界很平庸几乎毫无亮点尤其丧失了机甲的厚重和质感搞成这样跟变形金刚有毛区别啊何况还没汽车人拍的好看连让人抖腿的都变奏的不燃了过分了啊大甜甜怒刷存在感还是有点尬张晋倒还是辣么帅整体蛮中国订制的这么对比来看陀螺拿奥斯卡果然是实至名归的如果要拍请让他导甜婊假双眼皮看得我尴尬的不行御台场为什么会有京东的广告为什么这么燃的只出现了一次这音乐真的差评看看景甜再看看凛子女王乡镇女企业家还我环太平洋原班人马景甜的植入还好还算自然这一部的打斗戏份没有上一部多没那么多钱可以烧了吧自动驾驶一出事这个又来高级黑一把怪不得陀螺不接这个本子和前作一比少了灵魂奥特曼打怪兽开头镜头晕的我闭着眼睛差点睡着后面剧情以为正义方会出现什么新的机器人结果啥都没出现最后我老公说来了个如来神掌撞死了简直不要太敷衍大甜甜表现不错终于在片中起了很大作用不然我要打星即使有大家都讨厌的景甜评分这么低也不科学啊跟翔一样的独立日和变形金刚相比这部真算得上爆米花中的良心作品了'
segment = jieba.lcut(cleaned_comments)
words_df = pd.DataFrame({'segment': segment})
# 去除常用高频词
stopwords = pd.read_csv("chineseStopWords.txt", index_col=False, quoting=3, sep="\t", names=['stopword'], encoding='utf-8')#quoting=3全不引用
words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
# 词频统计
words_stat = words_df.groupby(by=['segment'])['segment'].agg({"计数":numpy.size})
words_stat = words_stat.reset_index().sort_values(by=["计数"], ascending=False)
print(words_stat.head())
# 词云
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)
wordcloud = WordCloud(font_path="simhei.ttf", background_color="white", max_font_size=80) # 指定字体类型、字体大小和字体颜色
word_frequence = {x[0]: x[1] for x in words_stat.head(1000).values}
# word_frequence_list = []
# for key in word_frequence:
# temp = (key, word_frequence[key])
# word_frequence_list.append(temp)
# print(word_frequence_list)
wordcloud = wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)
plt.show() | mit |
jesseerdmann/audiobonsai | weekly_sampler.py | 2 | 8309 | from audiobonsai import wsgi, settings
from datetime import datetime
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
import pandas as pd
from pprint import pprint
from sausage_grinder.models import Artist, ReleaseSet
from spotify_helper.models import SpotifyUser
from spotipy import SpotifyException
from spotify_helper.helpers import get_user_conn
def attr_score(val, factor):
return (val//factor)**2
def stat_score(df, in_col, out_col, multiplier=1):
in_min = df[in_col].min()
in_max = df[in_col].max()
in_col_temp = in_col + "_temp"
df.loc[:, in_col_temp] = df[in_col].apply(lambda x: x - in_min)
factor = (in_max - in_min) // 25
df.loc[:, out_col] = df[in_col_temp].apply(lambda x: attr_score(x, factor) * multiplier)
return df
def build_artists_dict(week):
artists = Artist.objects.filter(weeks=week)
artists_dict = {}
for artist in artists:
release = artist.week_release(week)
if release is None:
print('No release found for {} in week {}'.format(artist, week))
continue
if release.release_type == 'single':
continue
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
if artist.release_day_foll <= 100 and artist.followers_change_pct_from_release >= 100:
#print('{}: foll pct reset'.format(artist))
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
artist.followers_change_pct_from_release = min(artist.followers_change_from_release, 100)
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
if artist.release_day_pop <= 10 and artist.pop_change_pct_from_release >= 100:
#print('{}: pop pct reset'.format(artist))
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
artist.pop_change_pct_from_release = min(artist.pop_change_from_release*10, 100)
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
artists_dict[artist.spotify_uri] = {
'obj': artist,
'name': artist.name,
'pop': artist.popularity,
'pop_change': artist.pop_change_from_release,
'pop_change_pct': artist.pop_change_pct_from_release,
'foll': artist.followers,
'foll_change': artist.followers_change_from_release,
'foll_change_pct': artist.followers_change_pct_from_release,
'release_day_foll': artist.release_day_foll,
'release_day_pop': artist.release_day_pop,
'release': release
}
return artists_dict
def build_artists_df(week):
artists_dict = build_artists_dict(week)
artists_df = pd.DataFrame.from_dict(artists_dict, orient='index')
artists_df = stat_score(artists_df, 'pop', 'pop_score')
artists_df = stat_score(artists_df, 'pop_change', 'pop_change_score')
artists_df = stat_score(artists_df, 'pop_change_pct', 'pop_change_pct_score')
artists_df = stat_score(artists_df, 'foll', 'foll_score')
artists_df = stat_score(artists_df, 'foll_change', 'foll_change_score')
artists_df = stat_score(artists_df, 'foll_change_pct', 'foll_change_score_pct')
artists_df['final_score'] = artists_df['pop_score'] + \
artists_df['foll_score'] + \
artists_df['pop_change_pct_score'] + \
artists_df['pop_change_score'] + \
artists_df['foll_change_score'] + \
artists_df['foll_change_score_pct']
return artists_df
if __name__ == '__main__':
weeks = ReleaseSet.objects.all().order_by('-week_date')
week = weeks[0]
artists_df = build_artists_df(week)
artists_df = artists_df.sort_values(by='final_score', ascending=False)
artists_df = artists_df.drop_duplicates(subset='release', keep='first')
artists_df['category'] = pd.cut(artists_df['release_day_pop'], 10)
#artists_df['category'] = pd.qcut(artists_df['release_day_foll'], 5, duplicates='drop')
#top100_df = artists_df.sort_values(by='final_score', ascending=False)
#top100_df = top100_df.drop_duplicates(subset='release', keep='first').head(200)
#print(top100_df)
playlist_name = 'Fresh Cuts: {}'.format(week.week_date.strftime('%b %d, %Y'))
user = User.objects.get(username=settings.SPOTIFY_USERNAME)
spotify_user = SpotifyUser.objects.get(user=user)
track_list = []
sp = get_user_conn(spotify_user, '127.0.0.1:8000')
category_num = 1
for category in sorted(artists_df['category'].unique()):
category_df = artists_df[artists_df['category'] == category]
print('\nCategory {:d}'.format(category_num))
print('{}: Min {:10d}, Max {:10d}, Count {:10d}'.format(category, category_df['release_day_pop'].min(), category_df['release_day_pop'].max(), len(category_df)))
category_df = category_df.head(20)
#print(category_df)
#print('{}: Min {:10d}, Max {:10d}, Count {:10d}'.format(category, category_df['release_day_foll'].min(), category_df['release_day_foll'].max(), len(category_df)))
#for release in top100_df['release'].values:
for release in category_df['release'].values:
try:
album_dets = sp.album(release.spotify_uri)
except requests.exceptions.ConnectionError:
continue
print('#{:03d} {:6s}: {}'.format(len(track_list)+1, release.release_type, release))
if album_dets['type'] == 'single':
track_list.append(album_dets['tracks']['items'][0]['uri'])
else:
track_dict = {}
for track in album_dets['tracks']['items'][:5]:
if track['duration_ms'] not in track_dict.keys():
track_dict[track['duration_ms']] = []
track_dict[track['duration_ms']].append(track['uri'])
track_times = sorted(list(track_dict.keys()))
median_time_key = track_times[int(len(track_times)/2)]
track_list.append(track_dict[median_time_key][0])
category_num += 1
#playlist = sp.user_playlist_create(user, playlist_name)
#pprint(playlist)
sausage_grinder_playlist = 'spotify:user:audiobonsai:playlist:6z8m6hjBXxClAZt3oYONCa'
batch_size = 100
offset = 0
while offset < len(track_list):
if offset == 0:
#playlist_tracks = sp.user_playlist_replace_tracks(user, playlist['id'], track_list[offset:offset + batch_size])
playlist_tracks = sp.user_playlist_replace_tracks(user, sausage_grinder_playlist, track_list[offset:offset + batch_size])
else:
#playlist_tracks = sp.user_playlist_add_tracks(user, playlist['id'], track_list[offset:offset + batch_size])
playlist_tracks = sp.user_playlist_add_tracks(user, sausage_grinder_playlist, track_list[offset:offset + batch_size])
offset += batch_size
pprint(playlist_tracks)
| apache-2.0 |
Windy-Ground/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
eepalms/gem5-newcache | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/io/parser/test_network.py | 4 | 8535 | # -*- coding: utf-8 -*-
"""
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
import os
import pytest
import moto
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.io.parsers import read_csv, read_table
from pandas.compat import BytesIO
@pytest.fixture(scope='module')
def tips_file():
return os.path.join(tm.get_data_path(), 'tips.csv')
@pytest.fixture(scope='module')
def salaries_table():
path = os.path.join(tm.get_data_path(), 'salaries.csv')
return read_table(path)
@pytest.fixture(scope='module')
def s3_resource(tips_file):
pytest.importorskip('s3fs')
moto.mock_s3().start()
test_s3_files = [
('tips.csv', tips_file),
('tips.csv.gz', tips_file + '.gz'),
('tips.csv.bz2', tips_file + '.bz2'),
]
def add_tips_files(bucket_name):
for s3_key, file_name in test_s3_files:
with open(file_name, 'rb') as f:
conn.Bucket(bucket_name).put_object(
Key=s3_key,
Body=f)
boto3 = pytest.importorskip('boto3')
# see gh-16135
bucket = 'pandas-test'
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
add_tips_files(bucket)
conn.create_bucket(Bucket='cant_get_it', ACL='private')
add_tips_files('cant_get_it')
yield conn
moto.mock_s3().stop()
@pytest.mark.network
@pytest.mark.parametrize(
"compression,extension",
[('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'),
pytest.param('xz', '.xz',
marks=pytest.mark.skipif(not tm._check_if_lzma(),
reason='need backports.lzma '
'to run'))])
@pytest.mark.parametrize('mode', ['explicit', 'infer'])
@pytest.mark.parametrize('engine', ['python', 'c'])
def test_compressed_urls(salaries_table, compression, extension, mode, engine):
check_compressed_urls(salaries_table, compression, extension, mode, engine)
@tm.network
def check_compressed_urls(salaries_table, compression, extension, mode,
engine):
# test reading compressed urls with various engines and
# extension inference
base_url = ('https://github.com/pandas-dev/pandas/raw/master/'
'pandas/tests/io/parser/data/salaries.csv')
url = base_url + extension
if mode != 'explicit':
compression = mode
url_table = read_table(url, compression=compression, engine=engine)
tm.assert_frame_equal(url_table, salaries_table)
class TestS3(object):
@tm.network
def test_parse_public_s3_bucket(self):
pytest.importorskip('s3fs')
# more of an integration test due to the not-public contents portion
# can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df)
def test_parse_public_s3n_bucket(self, s3_resource):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_parse_public_s3a_bucket(self, s3_resource):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_parse_public_s3_bucket_nrows(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_parse_public_s3_bucket_chunked(self, s3_resource):
# Read with a chunksize
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_chunked_python(self, s3_resource):
# Read with a chunksize using the Python parser
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp,
engine='python')
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
def test_infer_s3_compression(self, s3_resource):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
def test_parse_public_s3_bucket_nrows_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_s3_fails(self, s3_resource):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
# Receive a permission error when trying to read a private bucket.
# It's irrelevant here that this isn't actually a table.
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
def test_read_csv_handles_boto_s3_object(self,
s3_resource,
tips_file):
# see gh-16135
s3_object = s3_resource.meta.client.get_object(
Bucket='pandas-test',
Key='tips.csv')
result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
| apache-2.0 |
marcoviero/simstack | run_simstack_cmd_line.py | 1 | 10390 | #!/usr/bin/env python
# Standard modules
import pdb
import os
import os.path
import sys
import shutil
import time
import logging
import importlib
import numpy as np
import pandas as pd
import cPickle as pickle
from astropy.wcs import WCS
# Modules within this package
import parameters
from skymaps import Skymaps
from bincatalogs import Field_catalogs
from utils import circle_mask
from utils import dist_idl
from utils import gauss_kern
from utils import pad_and_smooth_psf
from utils import shift_twod
from utils import smooth_psf
from lmfit import Parameters, minimize, fit_report
from simstack import stack_libraries_in_layers
from simstack import stack_libraries_in_layers_w_background
from simstack import is_true
from bootstrap import Bootstrap
def main():
# Set up logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%Y-%d-%m %I:%M:%S %p')
# Get parameters from the provided parameter file
param_file_path = sys.argv[1]
params = parameters.get_params(param_file_path)
zkey = params['zkey']
mkey = params['mkey']
rkey = params['ra_key']
dkey = params['dec_key']
t0 = time.time()
if params['bins']['bin_in_lookback_time'] == True:
z_pref = 'lookt'
else:
z_pref = 'z'
# Stack in Slices or ALL AT ONCE Choice made here
if params['bins']['stack_all_z_at_once'] == True: n_slices = 1
else: n_slices = len(params['bins']['z_nodes']) - 1
#Save Parameter file in folder
save_paramfile(params)
for i in range(n_slices):
if params['bins']['stack_all_z_at_once'] == True:
j = None
stacked_flux_density_key = 'all_'+z_pref
else:
j = i
if params['bins']['bin_in_lookback_time'] == True:
stacked_flux_density_key = '{:.2f}'.format(params['bins']['t_nodes'][j])+'-'+'{:.2f}'.format(params['bins']['t_nodes'][j+1])
else:
stacked_flux_density_key = str(params['bins']['t_nodes'][j])+'-'+str(params['bins']['t_nodes'][j+1])
print stacked_flux_density_key
# From parameter file read maps, psfs, cats, and divide them into bins
sky_library = get_maps(params)
cats = get_catalogs(params)
if params['bootstrap'] == True:
pcat = Bootstrap(cats.table)
# Bootstrap Loop Starts here
for iboot in np.arange(params['number_of_boots'])+params['boot0']:
#stacked_flux_densities = {}
if params['bootstrap'] == True:
print 'Running ' +str(int(iboot))+' of '+ str(int(params['boot0'])) +'-'+ str(int(params['boot0']+params['number_of_boots']-1)) + ' bootstraps'
pcat.perturb_catalog(perturb_z = params['perturb_z'])
bootcat = Field_catalogs(pcat.pseudo_cat,zkey=zkey,mkey=mkey,rkey=rkey,dkey=dkey)
binned_ra_dec = get_bin_radec(params, bootcat, single_slice = j)
if params['save_bin_ids'] == False:
bin_ids = None
else:
bin_ids = get_bin_ids(params, bootcat, single_slice = j)
out_file_path = params['io']['output_folder']+'/bootstrapped_fluxes/'+params['io']['shortname']
out_file_suffix = '_'+stacked_flux_density_key+'_boot_'+str(int(iboot))
else:
binned_ra_dec = get_bin_radec(params, cats, single_slice = j)
if params['save_bin_ids'] == False:
bin_ids = None
else:
bin_ids = get_bin_ids(params, cats, single_slice = j)
out_file_path = params['io']['output_folder'] + '/simstack_fluxes/' + params['io']['shortname']
out_file_suffix = '_'+stacked_flux_density_key
# Do simultaneous stacking
if params['float_background'] == True:
stacked_flux_densities = stack_libraries_in_layers_w_background(sky_library,binned_ra_dec)
else:
stacked_flux_densities = stack_libraries_in_layers(sky_library,binned_ra_dec)
save_stacked_fluxes(stacked_flux_densities,params, out_file_path,out_file_suffix, IDs=bin_ids)
#pdb.set_trace()
# Summarize timing
t1 = time.time()
tpass = t1-t0
logging.info("Done!")
logging.info("")
logging.info("Total time : {:.4f} minutes\n".format(tpass/60.))
def get_maps(params):
'''
Read maps and psfs and store into dictionaries
'''
sky_library = {}
for t in params['library_keys']:
sky = Skymaps(params['map_files'][t],params['noise_files'][t],params['psfs'][t+'_fwhm'],color_correction=params['color_correction'][t], beam_area=params['psfs'][t+'_beam_area'])
sky.add_wavelength(params['wavelength'][t])
sky.add_fwhm(params['psfs'][t+'_fwhm'])
sky_library[t] = sky
return sky_library
def get_catalogs(params):
# Formatting no longer needed as
tbl = pd.read_table(params['catalogs']['catalog_path']+params['catalogs']['catalog_file'],sep=',')
tbl['ID'] = range(len(tbl))
if 'sfg' in tbl.keys():
pass
elif 'CLASS' in tbl.keys():
tbl['sfg']=tbl['CLASS']
zkey = params['zkey']
mkey = params['mkey']
rkey = params['ra_key']
dkey = params['dec_key']
catout = Field_catalogs(tbl,zkey=zkey,mkey=mkey,rkey=rkey,dkey=dkey)
return catout
def get_bin_ids(params, cats, single_slice = None):
if single_slice == None:
z_nodes = params['bins']['z_nodes']
else:
z_nodes = params['bins']['z_nodes'][single_slice:single_slice+2]
m_nodes = params['bins']['m_nodes']
if params['galaxy_splitting_scheme'] == 'sf-qt':
cats.separate_sf_qt()
cats.get_sf_qt_mass_redshift_bins(z_nodes,m_nodes)
bin_ids = cats.id_z_ms
elif params['galaxy_splitting_scheme'] == '5pops':
Fcut = params['cuts']['fcut']
MIPS24_cut = params['cuts']['mips24_cut']
cats.separate_5pops(Fcut=Fcut,MIPS24_cut=MIPS24_cut)
cats.get_5pops_mass_redshift_bins(z_nodes,m_nodes)
bin_ids = cats.id_z_ms_5pop
elif params['galaxy_splitting_scheme'] == '4pops':
Fcut = params['cuts']['fcut']
age_cut = params['cuts']['age_cut']
cats.separate_4pops(Fcut=Fcut,age_cut=age_cut)
cats.get_4pops_mass_redshift_bins(z_nodes,m_nodes)
bin_ids = cats.id_z_ms_4pop
elif params['galaxy_splitting_scheme'] == 'uvj':
c_nodes = params['populations']['c_nodes']
c_names = params['populations']['pop_names']
cats.table['UVJ']=np.sqrt((cats.table['rf_U_V'] - np.min(cats.table['rf_U_V']))**2 + (cats.table['rf_V_J']-np.min(cats.table['rf_V_J'])) ** 2)
cats.separate_uvj_pops(c_nodes)
cats.get_mass_redshift_uvj_bins(z_nodes,m_nodes,c_names)
bin_ids = cats.id_z_ms_pop
elif params['galaxy_splitting_scheme'] == 'general':
cuts_dict = params['populations']
cats.separate_pops_by_name(cuts_dict)
cats.get_subpop_ids(z_nodes, m_nodes, cuts_dict)
bin_ids = cats.subpop_ids
return bin_ids
def get_bin_radec(params, cats, single_slice = None):
if single_slice == None:
z_nodes = params['bins']['z_nodes']
else:
z_nodes = params['bins']['z_nodes'][single_slice:single_slice+2]
m_nodes = params['bins']['m_nodes']
if params['galaxy_splitting_scheme'] == 'sf-qt':
cats.separate_sf_qt()
cats.get_sf_qt_mass_redshift_bins(z_nodes,m_nodes)
binned_ra_dec = cats.subset_positions(cats.id_z_ms)
elif params['galaxy_splitting_scheme'] == '5pops':
Fcut = params['cuts']['fcut']
MIPS24_cut = params['cuts']['mips24_cut']
cats.separate_5pops(Fcut=Fcut,MIPS24_cut=MIPS24_cut)
cats.get_5pops_mass_redshift_bins(z_nodes,m_nodes)
binned_ra_dec = cats.subset_positions(cats.id_z_ms_5pop)
elif params['galaxy_splitting_scheme'] == '4pops':
Fcut = params['cuts']['fcut']
age_cut = params['cuts']['age_cut']
cats.separate_4pops(Fcut=Fcut,age_cut=age_cut)
cats.get_4pops_mass_redshift_bins(z_nodes,m_nodes)
binned_ra_dec = cats.subset_positions(cats.id_z_ms_4pop)
elif params['galaxy_splitting_scheme'] == 'uvj':
c_nodes = params['populations']['c_nodes']
c_names = params['populations']['pop_names']
cats.table['UVJ']=np.sqrt((cats.table['rf_U_V'] - np.min(cats.table['rf_U_V']))**2 + (cats.table['rf_V_J']-np.min(cats.table['rf_V_J'])) ** 2)
cats.separate_uvj_pops(c_nodes)
cats.get_mass_redshift_uvj_bins(z_nodes,m_nodes,c_names)
binned_ra_dec = cats.subset_positions(cats.id_z_ms_pop)
elif params['galaxy_splitting_scheme'] == 'general':
cuts_dict = params['populations']
cats.separate_pops_by_name(cuts_dict)
cats.get_subpop_ids(z_nodes, m_nodes, cuts_dict)
binned_ra_dec = cats.subset_positions(cats.subpop_ids)
print z_nodes
return binned_ra_dec
def save_stacked_fluxes(stacked_fluxes, params, out_file_path, out_file_suffix, IDs=None):
fpath = "%s/%s_%s%s.p" % (out_file_path, params['io']['flux_densities_filename'],params['io']['shortname'],out_file_suffix)
print 'pickling to '+fpath
if not os.path.exists(out_file_path): os.makedirs(out_file_path)
if IDs == None:
pickle.dump( stacked_fluxes, open( fpath, "wb" )) #, protocol=2 )
else:
pickle.dump( [IDs, stacked_fluxes], open( fpath, "wb" )) #, protocol=2 )
def save_paramfile(params):
fp_in = params['io']['param_file_path']
if params['bootstrap'] == True:
outdir = params['io']['output_folder']+'/bootstrapped_fluxes/'+params['io']['shortname']
else:
outdir = params['io']['output_folder']+'/simstack_fluxes/'+params['io']['shortname']
print 'writing parameter file to '+outdir
if not os.path.exists(outdir): os.makedirs(outdir)
fname = os.path.basename(fp_in)
fp_out = os.path.join(outdir, fname)
logging.info("Copying parameter file...")
logging.info(" FROM : {}".format(fp_in))
logging.info(" TO : {}".format(fp_out))
logging.info("")
shutil.copyfile(fp_in, fp_out)
if __name__=="__main__":
main()
else:
logging.info("Note: `mapit` module not being run as main executable.")
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/test_resample.py | 3 | 126730 | # pylint: disable=E1101
from warnings import catch_warnings
from datetime import datetime, timedelta
from functools import partial
import pytest
import numpy as np
import pandas as pd
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
from pandas import (Series, DataFrame, Panel, Index, isnull,
notnull, Timestamp)
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.compat import range, lrange, zip, product, OrderedDict
from pandas.core.base import SpecificationError
from pandas.errors import UnsupportedFunctionCall
from pandas.core.groupby import DataError
from pandas.tseries.frequencies import MONTHS, DAYS
from pandas.tseries.frequencies import to_offset
from pandas.core.indexes.datetimes import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.core.indexes.period import period_range, PeriodIndex, Period
from pandas.core.resample import (DatetimeIndex, TimeGrouper,
DatetimeIndexResampler)
from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
from pandas._libs.period import IncompatibleFrequency
bday = BDay()
# The various methods we support
downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem',
'median', 'prod', 'var', 'ohlc']
upsample_methods = ['count', 'size']
series_methods = ['nunique']
resample_methods = downsample_methods + upsample_methods + series_methods
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestResampleAPI(object):
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
self.frame = DataFrame(
{'A': self.series, 'B': self.series, 'C': np.arange(len(dti))})
def test_str(self):
r = self.series.resample('H')
assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, '
'label=left, convention=start, base=0]' in str(r))
def test_api(self):
r = self.series.resample('H')
result = r.mean()
assert isinstance(result, Series)
assert len(result) == 217
r = self.series.to_frame().resample('H')
result = r.mean()
assert isinstance(result, DataFrame)
assert len(result) == 217
def test_api_changes_v018(self):
# change from .resample(....., how=...)
# to .resample(......).how()
r = self.series.resample('H')
assert isinstance(r, DatetimeIndexResampler)
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how=how)
expected = getattr(self.series.resample('H'), how)()
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how='ohlc')
expected = self.series.resample('H').ohlc()
tm.assert_frame_equal(result, expected)
# compat for pandas-like methods
for how in ['sort_values', 'isnull']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(r, how)()
# invalids as these can be setting operations
r = self.series.resample('H')
pytest.raises(ValueError, lambda: r.iloc[0])
pytest.raises(ValueError, lambda: r.iat[0])
pytest.raises(ValueError, lambda: r.loc[0])
pytest.raises(ValueError, lambda: r.loc[
Timestamp('2013-01-01 00:00:00', offset='H')])
pytest.raises(ValueError, lambda: r.at[
Timestamp('2013-01-01 00:00:00', offset='H')])
def f():
r[0] = 5
pytest.raises(ValueError, f)
# str/repr
r = self.series.resample('H')
with tm.assert_produces_warning(None):
str(r)
with tm.assert_produces_warning(None):
repr(r)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
tm.assert_numpy_array_equal(np.array(r), np.array(r.mean()))
# masquerade as Series/DataFrame as needed for API compat
assert isinstance(self.series.resample('H'), ABCSeries)
assert not isinstance(self.frame.resample('H'), ABCSeries)
assert not isinstance(self.series.resample('H'), ABCDataFrame)
assert isinstance(self.frame.resample('H'), ABCDataFrame)
# bin numeric ops
for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), pd.Series)
# unary numeric ops
for op in ['__pos__', '__neg__', '__abs__', '__inv__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(), pd.Series)
# comparison ops
for op in ['__lt__', '__le__', '__gt__', '__ge__', '__eq__', '__ne__']:
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), pd.Series)
# IPython introspection shouldn't trigger warning GH 13618
for op in ['_repr_json', '_repr_latex',
'_ipython_canary_method_should_not_exist_']:
r = self.series.resample('H')
with tm.assert_produces_warning(None):
getattr(r, op, None)
# getitem compat
df = self.series.to_frame('foo')
# same as prior versions for DataFrame
pytest.raises(KeyError, lambda: df.resample('H')[0])
# compat for Series
# but we cannot be sure that we need a warning here
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')[0]
expected = self.series.resample('H').mean()[0]
assert result == expected
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')['2005-01-09 23:00:00']
expected = self.series.resample('H').mean()['2005-01-09 23:00:00']
assert result == expected
def test_groupby_resample_api(self):
# GH 12448
# .groupby(...).resample(...) hitting warnings
# when appropriate
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
# replication step
i = pd.date_range('2016-01-03', periods=8).tolist() + \
pd.date_range('2016-01-17', periods=8).tolist()
index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i],
names=['group', 'date'])
expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]},
index=index)
result = df.groupby('group').apply(
lambda x: x.resample('1D').ffill())[['val']]
assert_frame_equal(result, expected)
def test_groupby_resample_on_api(self):
# GH 15021
# .groupby(...).resample(on=...) results in an unexpected
# keyword warning.
df = pd.DataFrame({'key': ['A', 'B'] * 5,
'dates': pd.date_range('2016-01-01', periods=10),
'values': np.random.randn(10)})
expected = df.set_index('dates').groupby('key').resample('D').mean()
result = df.groupby('key').resample('D', on='dates').mean()
assert_frame_equal(result, expected)
def test_plot_api(self):
tm._skip_if_no_mpl()
# .resample(....).plot(...)
# hitting warnings
# GH 12448
s = Series(np.random.randn(60),
index=date_range('2016-01-01', periods=60, freq='1min'))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min').plot()
tm.assert_is_valid_plot_return_object(result)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min', how='sum').plot()
tm.assert_is_valid_plot_return_object(result)
def test_getitem(self):
r = self.frame.resample('H')
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.resample('H')['B']
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
def test_select_bad_cols(self):
g = self.frame.resample('H')
pytest.raises(KeyError, g.__getitem__, ['D'])
pytest.raises(KeyError, g.__getitem__, ['A', 'D'])
with tm.assert_raises_regex(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'D']]
def test_attribute_access(self):
r = self.frame.resample('H')
tm.assert_series_equal(r.A.sum(), r['A'].sum())
# getting
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
pytest.raises(AttributeError, lambda: r.F)
# setting
def f():
r.F = 'bah'
pytest.raises(ValueError, f)
def test_api_compat_before_use(self):
# make sure that we are setting the binner
# on these attributes
for attr in ['groups', 'ngroups', 'indices']:
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = pd.Series(np.arange(len(rng)), index=rng)
rs = ts.resample('30s')
# before use
getattr(rs, attr)
# after grouper is initialized is ok
rs.mean()
getattr(rs, attr)
def tests_skip_nuisance(self):
df = self.frame
df['D'] = 'foo'
r = df.resample('H')
result = r[['A', 'B']].sum()
expected = pd.concat([r.A.sum(), r.B.sum()], axis=1)
assert_frame_equal(result, expected)
expected = r[['A', 'B', 'C']].sum()
result = r.sum()
assert_frame_equal(result, expected)
def test_downsample_but_actually_upsampling(self):
# this is reindex / asfreq
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng)
result = ts.resample('20s').asfreq()
expected = Series([0, 20, 40, 60, 80],
index=pd.date_range('2012-01-01 00:00:00',
freq='20s',
periods=5))
assert_series_equal(result, expected)
def test_combined_up_downsampling_of_irregular(self):
# since we are reallydoing an operation like this
# ts2.resample('2s').mean().ffill()
# preserve these semantics
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = pd.Series(np.arange(len(rng)), index=rng)
ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = ts2.resample('2s', how='mean', fill_method='ffill')
expected = ts2.resample('2s').mean().ffill()
assert_series_equal(result, expected)
def test_transform(self):
r = self.series.resample('20min')
expected = self.series.groupby(
pd.Grouper(freq='20min')).transform('mean')
result = r.transform('mean')
assert_series_equal(result, expected)
def test_fillna(self):
# need to upsample here
rng = pd.date_range('1/1/2012', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng), dtype='int64'), index=rng)
r = ts.resample('s')
expected = r.ffill()
result = r.fillna(method='ffill')
assert_series_equal(result, expected)
expected = r.bfill()
result = r.fillna(method='bfill')
assert_series_equal(result, expected)
with pytest.raises(ValueError):
r.fillna(0)
def test_apply_without_aggregation(self):
# both resample and groupby should work w/o aggregation
r = self.series.resample('20min')
g = self.series.groupby(pd.Grouper(freq='20min'))
for t in [g, r]:
result = t.apply(lambda x: x)
assert_series_equal(result, self.series)
def test_agg_consistency(self):
# make sure that we are consistent across
# similar aggregations with and w/o selection list
df = DataFrame(np.random.randn(1000, 3),
index=pd.date_range('1/1/2012', freq='S', periods=1000),
columns=['A', 'B', 'C'])
r = df.resample('3T')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})
result = r.agg({'r1': 'mean', 'r2': 'sum'})
assert_frame_equal(result, expected)
# TODO: once GH 14008 is fixed, move these tests into
# `Base` test class
def test_agg(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = pd.DataFrame(np.random.rand(10, 2),
columns=list('AB'),
index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'],
['mean', 'std']])
for t in cases:
result = t.aggregate([np.mean, np.std])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, b_std], axis=1)
for t in cases:
result = t.aggregate({'A': np.mean,
'B': np.std})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std']})
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
for t in cases:
result = t['A'].aggregate(['mean', 'sum'])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum'),
('B', 'mean2'),
('B', 'sum2')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},
'B': {'mean2': 'mean', 'sum2': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'),
('r1', 'A', 'sum'),
('r2', 'B', 'mean'),
('r2', 'B', 'sum')])
def test_agg_misc(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = pd.DataFrame(np.random.rand(10, 2),
columns=list('AB'),
index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
# passed lambda
for t in cases:
result = t.agg({'A': np.sum,
'B': lambda x: np.std(x, ddof=1)})
rcustom = t['B'].apply(lambda x: np.std(x, ddof=1))
expected = pd.concat([r['A'].sum(), rcustom], axis=1)
assert_frame_equal(result, expected, check_like=True)
# agg with renamers
expected = pd.concat([t['A'].sum(),
t['B'].sum(),
t['A'].mean(),
t['B'].mean()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'),
('result1', 'B'),
('result2', 'A'),
('result2', 'B')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),
('result2', np.mean)]))
assert_frame_equal(result, expected, check_like=True)
# agg with different hows
expected = pd.concat([t['A'].sum(),
t['A'].std(),
t['B'].mean(),
t['B'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.agg(OrderedDict([('A', ['sum', 'std']),
('B', ['mean', 'std'])]))
assert_frame_equal(result, expected, check_like=True)
# equivalent of using a selection list / or not
for t in cases:
result = t[['A', 'B']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# series like aggs
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std']})
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std')])
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([t['A'].agg(['sum', 'std']),
t['A'].agg(['mean', 'std'])],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# errors
# invalid names in the agg specification
for t in cases:
def f():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
t[['A']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
pytest.raises(SpecificationError, f)
def test_agg_nested_dicts(self):
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = pd.DataFrame(np.random.rand(10, 2),
columns=list('AB'),
index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
for t in cases:
def f():
t.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
pytest.raises(ValueError, f)
for t in cases:
expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(),
t['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
def test_selection_api_validation(self):
# GH 13500
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
df = pd.DataFrame({'date': index,
'a': np.arange(len(index), dtype=np.int64)},
index=pd.MultiIndex.from_arrays([
np.arange(len(index), dtype=np.int64),
index], names=['v', 'd']))
df_exp = pd.DataFrame({'a': np.arange(len(index), dtype=np.int64)},
index=index)
# non DatetimeIndex
with pytest.raises(TypeError):
df.resample('2D', level='v')
with pytest.raises(ValueError):
df.resample('2D', on='date', level='d')
with pytest.raises(TypeError):
df.resample('2D', on=['a', 'date'])
with pytest.raises(KeyError):
df.resample('2D', level=['a', 'date'])
# upsampling not allowed
with pytest.raises(ValueError):
df.resample('2D', level='d').asfreq()
with pytest.raises(ValueError):
df.resample('2D', on='date').asfreq()
exp = df_exp.resample('2D').sum()
exp.index.name = 'date'
assert_frame_equal(exp, df.resample('2D', on='date').sum())
exp.index.name = 'd'
assert_frame_equal(exp, df.resample('2D', level='d').sum())
class Base(object):
"""
base class for resampling testing, calling
.create_series() generates a series of each index type
"""
def create_index(self, *args, **kwargs):
""" return the _index_factory created using the args, kwargs """
factory = self._index_factory()
return factory(*args, **kwargs)
def test_asfreq_downsample(self):
s = self.create_series()
result = s.resample('2D').asfreq()
expected = s.reindex(s.index.take(np.arange(0, len(s.index), 2)))
expected.index.freq = to_offset('2D')
assert_series_equal(result, expected)
frame = s.to_frame('value')
result = frame.resample('2D').asfreq()
expected = frame.reindex(
frame.index.take(np.arange(0, len(frame.index), 2)))
expected.index.freq = to_offset('2D')
assert_frame_equal(result, expected)
def test_asfreq_upsample(self):
s = self.create_series()
result = s.resample('1H').asfreq()
new_index = self.create_index(s.index[0], s.index[-1], freq='1H')
expected = s.reindex(new_index)
assert_series_equal(result, expected)
frame = s.to_frame('value')
result = frame.resample('1H').asfreq()
new_index = self.create_index(frame.index[0],
frame.index[-1], freq='1H')
expected = frame.reindex(new_index)
assert_frame_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
result = s.resample('1H').asfreq()
new_index = self.create_index(s.index[0], s.index[-1], freq='1H')
expected = s.reindex(new_index)
assert_series_equal(result, expected)
frame = s.to_frame('value')
frame.iloc[1] = None
result = frame.resample('1H').asfreq(fill_value=4.0)
new_index = self.create_index(frame.index[0],
frame.index[-1], freq='1H')
expected = frame.reindex(new_index, fill_value=4.0)
assert_frame_equal(result, expected)
def test_resample_interpolate(self):
# # 12925
df = self.create_series().to_frame('value')
assert_frame_equal(
df.resample('1T').asfreq().interpolate(),
df.resample('1T').interpolate())
def test_raises_on_non_datetimelike_index(self):
# this is a non datetimelike index
xp = DataFrame()
pytest.raises(TypeError, lambda: xp.resample('A').mean())
def test_resample_empty_series(self):
# GH12771 & GH12868
s = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
# need to test for ohlc from GH13083
methods = [method for method in resample_methods
if method != 'ohlc']
for method in methods:
result = getattr(s.resample(freq), method)()
expected = s.copy()
expected.index = s.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_series_equal(result, expected, check_dtype=False)
def test_resample_empty_dataframe(self):
# GH13212
index = self.create_series().index[:0]
f = DataFrame(index=index)
for freq in ['M', 'D', 'H']:
# count retains dimensions too
methods = downsample_methods + ['count']
for method in methods:
result = getattr(f.resample(freq), method)()
expected = f.copy()
expected.index = f.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_frame_equal(result, expected, check_dtype=False)
# test size for GH13212 (currently stays as df)
def test_resample_empty_dtypes(self):
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
for index in tm.all_timeseries_index_generator(0):
for dtype in (np.float, np.int, np.object, 'datetime64[ns]'):
for how in downsample_methods + upsample_methods:
empty_series = pd.Series([], index, dtype)
try:
getattr(empty_series.resample('d'), how)()
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object)
pass
def test_resample_loffset_arg_type(self):
# GH 13218, 15002
df = self.create_series().to_frame('value')
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coreces PeriodIndex to DateTimeIndex
if isinstance(expected_index, PeriodIndex):
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
for arg in ['mean', {'value': 'mean'}, ['mean']]:
result_agg = df.resample('2D', loffset='2H').agg(arg)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result_how = df.resample('2D', how=arg, loffset='2H')
if isinstance(arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value',
'mean')])
# GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex
if isinstance(expected.index, TimedeltaIndex):
with pytest.raises(AssertionError):
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
else:
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
class TestDatetimeIndex(Base):
_index_factory = lambda x: date_range
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def create_series(self):
i = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='dti')
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notnull(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10),
index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right').mean()
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=exp_idx)
assert_series_equal(result, expected)
assert result.index.name == 'index'
result = s.resample('5min', closed='left', label='right').mean()
exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min',
name='index')
expected = Series([s[:5].mean(), s[5:10].mean(),
s[10:].mean()], index=exp_idx)
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min').last()
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
args = downsample_methods
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min', name='index')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = getattr(s.resample(
'5min', closed='right', label='right'), arg)()
expected = s.groupby(grouplist).agg(func)
assert result.index.name == 'index'
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_numpy_compat(self):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range(
'20130101', periods=5, freq='s'))
r = s.resample('2s')
msg = "numpy operations are not valid with resample"
for func in ('min', 'max', 'sum', 'prod',
'mean', 'var', 'std'):
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func),
func, 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func), axis=1)
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = pd.DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class fn_class:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(fn_class())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A': np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days', freq='30T', periods=50)
df = DataFrame({'A': np.arange(1480)}, index=pd.to_timedelta(
np.arange(1480), unit='T'))
result = df.resample('30T').sum()
assert_frame_equal(result, expected)
s = df['A']
result = s.resample('30T').sum()
assert_series_equal(result, expected['A'])
def test_resample_single_period_timedelta(self):
s = Series(list(range(5)), index=pd.timedelta_range(
'1 day', freq='s', periods=5))
result = s.resample('2s').sum()
expected = Series([1, 5, 4], index=pd.timedelta_range(
'1 day', freq='2s', periods=3))
assert_series_equal(result, expected)
def test_resample_timedelta_idempotency(self):
# GH 12072
index = pd.timedelta_range('0', periods=9, freq='10L')
series = pd.Series(range(9), index=index)
result = series.resample('10L').mean()
expected = series
assert_series_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [
'date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s').sum()
expected = DataFrame({'value': [
4, 9, 4, 2
]}, index=date_range('2014-11-08', freq='6s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('7s').sum()
expected = DataFrame({'value': [
4, 10, 4, 1
]}, index=date_range('2014-11-08', freq='7s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('11s').sum()
expected = DataFrame({'value': [
11, 8
]}, index=date_range('2014-11-08', freq='11s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('13s').sum()
expected = DataFrame({'value': [
13, 6
]}, index=date_range('2014-11-08', freq='13s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('17s').sum()
expected = DataFrame({'value': [
16, 3
]}, index=date_range('2014-11-08', freq='17s', periods=2))
assert_frame_equal(result, expected)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun').last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/9/2005']
assert result.iloc[2] == s.iloc[-1]
result = s.resample('W-MON').last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s['1/3/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-TUE').last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s['1/4/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-WED').last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s['1/5/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-THU').last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s['1/6/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-FRI').last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s['1/7/2005']
assert result.iloc[1] == s['1/10/2005']
# to biz day
result = s.resample('B').last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/3/2005']
assert result.iloc[5] == s['1/9/2005']
assert result.index.name == 'index'
def test_resample_upsampling_picked_but_not_correct(self):
# Test for issue #3020
dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D').mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
import datetime
s = Series(np.arange(1., 6), index=[datetime.datetime(
1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1., 6), index=date_range(
'19750101', periods=5, freq='D'))
result = s.resample('D').count()
assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample('D').sum()
result2 = s.resample('D').mean()
assert_series_equal(result1, expected)
assert_series_equal(result2, expected)
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A').mean()
assert_series_equal(result['A'], df['A'].resample('A').mean())
result = df.resample('M').mean()
assert_series_equal(result['A'], df['A'].resample('M').mean())
df.resample('M', kind='period').mean()
df.resample('W-WED', kind='period').mean()
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=timedelta(minutes=1)).mean()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset='1min').mean()
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset=Minute(1)).mean()
assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun').last()
expected = ser.resample('w-sun', loffset=-bday).last()
assert result.index[0] - bday == expected.index[0]
def test_resample_loffset_count(self):
# GH 12725
start_time = '1/1/2000 00:00:00'
rng = date_range(start_time, periods=100, freq='S')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('10S', loffset='1s').count()
expected_index = (
date_range(start_time, periods=10, freq='10S') +
timedelta(seconds=1)
)
expected = pd.Series(10, index=expected_index)
assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
result = ts.resample('10S', loffset='1s').size()
assert_series_equal(result, expected)
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min').pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == 'index'
def test_resample_how_method(self):
# GH9915
s = pd.Series([11, 22],
index=[Timestamp('2015-03-31 21:48:52.672000'),
Timestamp('2015-03-31 21:49:52.739000')])
expected = pd.Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=[Timestamp('2015-03-31 21:48:50'),
Timestamp('2015-03-31 21:49:00'),
Timestamp('2015-03-31 21:49:10'),
Timestamp('2015-03-31 21:49:20'),
Timestamp('2015-03-31 21:49:30'),
Timestamp('2015-03-31 21:49:40'),
Timestamp('2015-03-31 21:49:50')])
assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point(self):
# GH 9756
index = DatetimeIndex(start='20150101', end='20150331', freq='BM')
expected = DataFrame({'A': Series([21, 41, 63], index=index)})
index = DatetimeIndex(start='20150101', end='20150331', freq='B')
df = DataFrame(
{'A': Series(range(len(index)), index=index)}, dtype='int64')
result = df.resample('BM').last()
assert_frame_equal(result, expected)
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').ffill(limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min').ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs['open'] == s[-6]
assert xs['high'] == s[-6:-1].max()
assert xs['low'] == s[-6:-1].min()
assert xs['close'] == s[-2]
xs = result.iloc[0]
assert xs['open'] == s[0]
assert xs['high'] == s[:5].max()
assert xs['low'] == s[:5].min()
assert xs['close'] == s[4]
def test_resample_ohlc_result(self):
# GH 12332
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
s = Series(range(len(index)), index=index)
a = s.loc[:'4-15-2000'].resample('30T').ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:'4-14-2000'].resample('30T').ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range('2013-12-30', '2014-01-07')
index = rng.drop([Timestamp('2014-01-01'),
Timestamp('2013-12-31'),
Timestamp('2014-01-04'),
Timestamp('2014-01-05')])
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample('B').mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B'))
assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe(self):
df = (
pd.DataFrame({
'PRICE': {
Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {
Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex_axis(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H').ohlc()
exp = pd.concat([df['VOLUME'].resample('H').ohlc(),
df['PRICE'].resample('H').ohlc()],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H').ohlc()
exp.columns = pd.MultiIndex.from_tuples([
('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'),
('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'),
('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq='M')
for i in range(12)])
df.iloc[3, :] = np.nan
result = df.resample('Q', axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [
Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right').mean()
result = bs.resample('8H').mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period').mean()
expected = ts.resample('A-DEC').mean()
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period').mean()
expected = ts.resample('A-JUN').mean()
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', closed='right',
label='right').ohlc()
assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc['1/1/2000 00:05'] == exp).all()
exp = _ohlc(ts['1/1/2000 5:55:01':])
assert (resampled.loc['1/1/2000 6:00:00'] == exp).all()
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M').mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
pytest.raises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1).mean()
expected = df.T.resample('M').mean().T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).mean()
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M').mean())
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2).mean()
expected = p_apply(panel2,
lambda x: x.resample('M', axis=1).mean())
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).apply(lambda x: x.mean(1))
expected = panel.resample('M', axis=1).mean()
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', axis=2).apply(lambda x: x.mean(2))
expected = panel.resample('M', axis=2).mean()
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left').mean()
expected = ts.resample(freq, closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D').apply(lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2).mean()
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_base_with_timedeltaindex(self):
# GH 10530
rng = timedelta_range(start='0s', periods=25, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample('2s', base=5).mean()
without_base = ts.resample('2s').mean()
exp_without_base = timedelta_range(start='0s', end='25s', freq='2s')
exp_with_base = timedelta_range(start='5s', end='29s', freq='2s')
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex(self):
# GH #12169
df = DataFrame({'Group_obj': 'A'},
index=pd.to_timedelta(list(range(20)), unit='s'))
df['Group'] = df['Group_obj'].astype('category')
result = df.resample('10s').agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame({'Group_obj': ['A', 'A'],
'Group': ['A', 'A']},
index=pd.to_timedelta([0, 10], unit='s'))
expected = expected.reindex_axis(['Group_obj', 'Group'], 1)
tm.assert_frame_equal(result, expected)
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left').mean()
expected = ts.resample('D', closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period').mean()
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg(self):
# aggregate a period resampler with a lambda
s2 = pd.Series(np.random.randint(0, 5, 50),
index=pd.period_range('2012-01-01',
freq='H',
periods=50),
dtype='float64')
expected = s2.to_timestamp().resample('D').mean().to_period()
result = s2.resample('D').agg(lambda x: x.mean())
assert_series_equal(result, expected)
def test_resample_segfault(self):
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)]
df = pd.DataFrame.from_records(all_wins_and_wagers,
columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
assert_frame_equal(result, expected)
def test_resample_dtype_preservation(self):
# GH 12202
# validation tests for dtype preservation
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4, freq='W'),
'group': [1, 1, 2, 2],
'val': Series([5, 6, 7, 8],
dtype='int32')}
).set_index('date')
result = df.resample('1D').ffill()
assert result.val.dtype == np.int32
result = df.groupby('group').resample('1D').ffill()
assert result.val.dtype == np.int32
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W').mean()
expected = ts.resample('W-SUN').mean()
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('M')
def test_nanosecond_resample_error(self):
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(
start=pd.to_datetime(start),
periods=10,
freq='100n'
)
ts = pd.Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg('mean')
exp_indx = pd.date_range(
start=pd.to_datetime(exp_start),
periods=10,
freq='100n'
)
exp = pd.Series(range(len(exp_indx)), index=exp_indx)
assert_series_equal(result, exp)
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M').mean()
expected = df.resample(
'M', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left').mean()
exp = df.tshift(1, freq='D').resample('M', kind='period').mean()
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q').mean()
expected = df.resample(
'Q', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left').mean()
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left').mean()
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M').mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday(self):
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
) | pd.date_range(
'2014-10-15 23:00:00', periods=2, freq='2200L')
s = pd.Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample('2200L').mean()
assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:02.000')
# Ensure right closing works
result = s.resample('2200L', label='right').mean()
assert result.index[-1] == pd.Timestamp('2014-10-15 23:00:04.200')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left').mean()
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
tm.assert_index_equal(result.index, ex_index)
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC').mean()
assert len(result) == 0
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period').mean()
assert len(result) == 1
assert result.index[0] == Period('2000-04', freq='M')
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d').mean()
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min').aggregate(['mean', 'sum'])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D').sum()
exp = ts.sort_index().resample('D').sum()
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64', 'int32', 'float64', 'float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M').apply(lambda x: x.mean())
exp = ts.resample('M').mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample('M').mean()
foo_exp.name = 'foo'
bar_exp = ts.resample('M').std()
bar_exp.name = 'bar'
result = ts.resample('M').apply(
[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ['foo', 'bar']
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample('M').aggregate({'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
tm.assert_series_equal(result['foo'], foo_exp, check_names=False)
tm.assert_series_equal(result['bar'], bar_exp, check_names=False)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS').sum()
def test_resample_consistency(self):
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range('2002-02-02', periods=4, freq='30T')
s = pd.Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min').bfill(limit=2)
r10 = s.resample('10Min').bfill()
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper(self):
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M').count()
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31',
'2014-09-30',
'2014-10-31', '2014-11-30'],
freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(
len(dates))))
result = df.set_index('A').resample('M').count()
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def test_resample_nunique(self):
# GH 12352
df = DataFrame({
'ID': {pd.Timestamp('2015-06-05 00:00:00'): '0010100903',
pd.Timestamp('2015-06-08 00:00:00'): '0010150847'},
'DATE': {pd.Timestamp('2015-06-05 00:00:00'): '2015-06-05',
pd.Timestamp('2015-06-08 00:00:00'): '2015-06-08'}})
r = df.resample('D')
g = df.groupby(pd.Grouper(freq='D'))
expected = df.groupby(pd.TimeGrouper('D')).ID.apply(lambda x:
x.nunique())
assert expected.name == 'ID'
for t in [r, g]:
result = r.ID.nunique()
assert_series_equal(result, expected)
result = df.ID.resample('D').nunique()
assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq='D')).nunique()
assert_series_equal(result, expected)
def test_resample_nunique_with_date_gap(self):
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h')
index3 = index.append(index2)
s = pd.Series(range(len(index3)), index=index3, dtype='int64')
r = s.resample('M')
# Since all elements are unique, these should all be the same
results = [
r.count(),
r.nunique(),
r.agg(pd.Series.nunique),
r.agg('nunique')
]
assert_series_equal(results[0], results[1])
assert_series_equal(results[0], results[2])
assert_series_equal(results[0], results[3])
def test_resample_group_info(self): # GH10914
for n, k in product((10000, 100000), (10, 100, 1000)):
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T').nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(),
freq='30T')
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side='right')
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1,
minlength=len(ix)).astype('int64', copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size(self):
n = 10000
dr = date_range('2015-09-19', periods=n, freq='T')
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample('7T').size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T')
bins = np.searchsorted(ix.values, ts.index.values, side='right')
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',
copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resample_across_dst(self):
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=['ts'])
dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=['ts'])
dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule='H').sum()
expected = DataFrame([5, 5], index=dti2)
assert_frame_equal(result, expected)
def test_resample_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
assert_frame_equal(df.resample(rule='D').sum(),
DataFrame([5], index=df.index.normalize()))
df.resample(rule='MS').sum()
assert_frame_equal(
df.resample(rule='MS').sum(),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)],
tz='US/Eastern')))
dti = date_range('2013-09-30', '2013-11-02', freq='30Min',
tz='Europe/Paris')
values = range(dti.size)
df = DataFrame({"a": values,
"b": values,
"c": values}, index=dti, dtype='int64')
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193]},
index=date_range('9/30/2013', '11/4/2013',
freq='W-MON', tz='Europe/Paris')),
'W-MON Frequency')
assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193]},
index=date_range('9/30/2013', '11/11/2013',
freq='2W-MON', tz='Europe/Paris')),
'2W-MON Frequency')
assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 1538],
"b": [47, 1537, 1586],
"c": [48, 1490, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='MS', tz='Europe/Paris')),
'MS Frequency')
assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 1538],
"b": [1537, 1586],
"c": [1538, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='2MS', tz='Europe/Paris')),
'2MS Frequency')
df_daily = df['10/26/2013':'10/29/2013']
assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})
[["a", "b", "c"]],
DataFrame({"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48]},
index=date_range('10/26/2013', '10/29/2013',
freq='D', tz='Europe/Paris')),
'D Frequency')
def test_resample_with_nat(self):
# GH 13020
index = DatetimeIndex([pd.NaT,
'1970-01-01 00:00:00',
pd.NaT,
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame_1s = DataFrame([3, 7, 11], index=index_1s)
assert_frame_equal(frame.resample('1s').mean(), frame_1s)
index_2s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:02'])
frame_2s = DataFrame([5, 11], index=index_2s)
assert_frame_equal(frame.resample('2s').mean(), frame_2s)
index_3s = DatetimeIndex(['1970-01-01 00:00:00'])
frame_3s = DataFrame([7], index=index_3s)
assert_frame_equal(frame.resample('3s').mean(), frame_3s)
assert_frame_equal(frame.resample('60s').mean(), frame_3s)
def test_resample_timedelta_values(self):
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range('1 day', '4 day', freq='4D')
df = DataFrame({'time': times}, index=times)
times2 = timedelta_range('1 day', '4 day', freq='2D')
exp = Series(times2, index=times2, name='time')
exp.iloc[1] = pd.NaT
res = df.resample('2D').first()['time']
tm.assert_series_equal(res, exp)
res = df['time'].resample('2D').first()
tm.assert_series_equal(res, exp)
def test_resample_datetime_values(self):
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({'timestamp': dates}, index=dates)
exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range('2016-01-15', periods=3, freq='2D'),
name='timestamp')
res = df.resample('2D').first()['timestamp']
tm.assert_series_equal(res, exp)
res = df['timestamp'].resample('2D').first()
tm.assert_series_equal(res, exp)
class TestPeriodIndex(Base):
_index_factory = lambda x: period_range
def create_series(self):
i = period_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='pi')
def test_asfreq_downsample(self):
# series
s = self.create_series()
expected = s.reindex(s.index.take(np.arange(0, len(s.index), 2)))
expected.index = expected.index.to_timestamp()
expected.index.freq = to_offset('2D')
# this is a bug, this *should* return a PeriodIndex
# directly
# GH 12884
result = s.resample('2D').asfreq()
assert_series_equal(result, expected)
# frame
frame = s.to_frame('value')
expected = frame.reindex(
frame.index.take(np.arange(0, len(frame.index), 2)))
expected.index = expected.index.to_timestamp()
expected.index.freq = to_offset('2D')
result = frame.resample('2D').asfreq()
assert_frame_equal(result, expected)
def test_asfreq_upsample(self):
# this is a bug, this *should* return a PeriodIndex
# directly
# GH 12884
s = self.create_series()
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1] + 1).to_timestamp(how='start'),
freq='1H',
closed='left')
expected = s.to_timestamp().reindex(new_index).to_period()
result = s.resample('1H').asfreq()
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1] + 1).to_timestamp(how='start'),
freq='1H',
closed='left')
expected = frame.to_timestamp().reindex(new_index).to_period()
result = frame.resample('1H').asfreq()
assert_frame_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
def test_selection(self):
index = self.create_series().index
# This is a bug, these should be implemented
# GH 14008
df = pd.DataFrame({'date': index,
'a': np.arange(len(index), dtype=np.int64)},
index=pd.MultiIndex.from_arrays([
np.arange(len(index), dtype=np.int64),
index], names=['v', 'd']))
with pytest.raises(NotImplementedError):
df.resample('2D', on='date')
with pytest.raises(NotImplementedError):
df.resample('2D', level='d')
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
pytest.raises(ValueError, lambda: ts.resample('a-dec').mean())
pytest.raises(ValueError, lambda: ts.resample('q-mar').mean())
pytest.raises(ValueError, lambda: ts.resample('M').mean())
pytest.raises(ValueError, lambda: ts.resample('w-thu').mean())
def test_basic_upsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample('D', convention='end').ffill()
expected = result.to_timestamp('D', how='end')
expected = expected.asfreq('D', 'ffill').to_period()
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
def test_resample_count(self):
# GH12774
series = pd.Series(1, index=pd.period_range(start='2000',
periods=100))
result = series.resample('M').count()
expected_index = pd.period_range(start='2000', freq='M', periods=4)
expected = pd.Series([31, 29, 31, 9], index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self):
# GH12770
series = pd.Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
for method in resample_methods:
result = getattr(series.resample('M'), method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
with pytest.raises(IncompatibleFrequency):
pd.Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# GH5430
tm._skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1)
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_with_local_timezone_dateutil(self):
# GH5430
tm._skip_if_no_dateutil()
import dateutil
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - 1)
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='end').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
pytest.raises(Exception, lambda: s.resample('A').ffill())
def test_resample_5minute(self):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5min').mean()
expected = ts.to_timestamp().resample('5min').mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU').asfreq()
assert result.isnull().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W').mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample(
'W').mean().tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D').mean()
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right').mean()
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period').mean()
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first'] = np.random.randn(len(rng))
ts['second'] = np.cumsum(np.random.randn(len(rng)))
expected = DataFrame(
{
'first': ts.resample('A').sum()['first'],
'second': ts.resample('A').mean()['second']},
columns=['first', 'second'])
result = ts.resample(
'A').agg({'first': np.sum,
'second': np.mean}).reindex(columns=['first', 'second'])
assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', closed='left', label='right').mean()
exp = s[1:].resample('10min', closed='left', label='right').mean()
assert_series_equal(result, exp)
result = s.resample('10min', closed='left', label='left').mean()
exp = s[1:].resample('10min', closed='left', label='left').mean()
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
tm.assert_index_equal(result.index, ex_index)
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A').mean()
exp = ts.to_timestamp().resample('A').mean().to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', closed='left', label='left').first()
def test_resample_bms_2752(self):
# GH2753
foo = pd.Series(index=pd.bdate_range('20000101', '20000201'))
res1 = foo.resample("BMS").mean()
res2 = foo.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp('20000103')
assert res1.index[0] == res2.index[0]
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span').mean()
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right').mean())
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left').mean())
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A").mean()
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3),
index=date_range('2000-1-1', periods=9))
result = df.resample('5D').mean()
expected = pd.concat(
[df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T
expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')]
assert_frame_equal(result, expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort_index()
index = date_range('2001-5-4', periods=4, freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D').count()
assert_frame_equal(result, expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D').sum()
assert_frame_equal(result, expected)
class TestTimedeltaIndex(Base):
_index_factory = lambda x: timedelta_range
def create_series(self):
i = timedelta_range('1 day',
'10 day', freq='D')
return Series(np.arange(len(i)), index=i, name='tdi')
def test_asfreq_bug(self):
import datetime as dt
df = DataFrame(data=[1, 3],
index=[dt.timedelta(), dt.timedelta(minutes=3)])
result = df.resample('1T').asfreq()
expected = DataFrame(data=[1, np.nan, np.nan, 3],
index=timedelta_range('0 day',
periods=4,
freq='1T'))
assert_frame_equal(result, expected)
class TestResamplerGrouper(object):
def setup_method(self, method):
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)},
index=date_range('1/1/2000',
freq='s',
periods=40))
def test_back_compat_v180(self):
df = self.frame
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how=how)
expected = getattr(df.groupby('A').resample('4s'), how)()
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how='mean',
fill_method='ffill')
expected = df.groupby('A').resample('4s').mean().ffill()
assert_frame_equal(result, expected)
def test_deferred_with_groupby(self):
# GH 12486
# support deferred resample ops with groupby
data = [['2010-01-01', 'A', 2], ['2010-01-02', 'A', 3],
['2010-01-05', 'A', 8], ['2010-01-10', 'A', 7],
['2010-01-13', 'A', 3], ['2010-01-01', 'B', 5],
['2010-01-03', 'B', 2], ['2010-01-04', 'B', 1],
['2010-01-11', 'B', 7], ['2010-01-14', 'B', 3]]
df = DataFrame(data, columns=['date', 'id', 'score'])
df.date = pd.to_datetime(df.date)
f = lambda x: x.set_index('date').resample('D').asfreq()
expected = df.groupby('id').apply(f)
result = df.set_index('date').groupby('id').resample('D').asfreq()
assert_frame_equal(result, expected)
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
f = lambda x: x.resample('1D').ffill()
expected = df.groupby('group').apply(f)
result = df.groupby('group').resample('1D').ffill()
assert_frame_equal(result, expected)
def test_getitem(self):
g = self.frame.groupby('A')
expected = g.B.apply(lambda x: x.resample('2s').mean())
result = g.resample('2s').B.mean()
assert_series_equal(result, expected)
result = g.B.resample('2s').mean()
assert_series_equal(result, expected)
result = g.resample('2s').mean().B
assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{'id': 1, 'buyer': 'A'}, {'id': 2, 'buyer': 'B'}]
df = pd.DataFrame(data, index=pd.date_range('2016-01-01', periods=2))
r = df.groupby('id').resample('1D')
result = r['buyer'].count()
expected = pd.Series([1, 1],
index=pd.MultiIndex.from_tuples(
[(1, pd.Timestamp('2016-01-01')),
(2, pd.Timestamp('2016-01-02'))],
names=['id', None]),
name='buyer')
assert_series_equal(result, expected)
result = r['buyer'].count()
assert_series_equal(result, expected)
def test_methods(self):
g = self.frame.groupby('A')
r = g.resample('2s')
for f in ['first', 'last', 'median', 'sem', 'sum', 'mean',
'min', 'max']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
for f in ['size']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['count']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
# series only
for f in ['nunique']:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['backfill', 'ffill', 'asfreq']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample('2s').ohlc())
assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample('2s'), f)(ddof=1))
assert_frame_equal(result, expected)
def test_apply(self):
g = self.frame.groupby('A')
r = g.resample('2s')
# reduction
expected = g.resample('2s').sum()
def f(x):
return x.resample('2s').sum()
result = r.apply(f)
assert_frame_equal(result, expected)
def f(x):
return x.resample('2s').apply(lambda y: y.sum())
result = g.apply(f)
assert_frame_equal(result, expected)
def test_resample_groupby_with_label(self):
# GH 13235
index = date_range('2000-01-01', freq='2D', periods=5)
df = DataFrame(index=index,
data={'col0': [0, 0, 1, 1, 2], 'col1': [1, 1, 1, 1, 1]}
)
result = df.groupby('col0').resample('1W', label='left').sum()
mi = [np.array([0, 0, 1, 2]),
pd.to_datetime(np.array(['1999-12-26', '2000-01-02',
'2000-01-02', '2000-01-02'])
)
]
mindex = pd.MultiIndex.from_arrays(mi, names=['col0', None])
expected = DataFrame(data={'col0': [0, 0, 2, 2], 'col1': [1, 1, 2, 1]},
index=mindex
)
assert_frame_equal(result, expected)
def test_consistency_with_window(self):
# consistent return values with window
df = self.frame
expected = pd.Int64Index([1, 2, 3], name='A')
result = df.groupby('A').resample('2s').mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby('A').rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns(self):
# GH 14233
df = pd.DataFrame(np.random.randn(20, 3),
columns=list('aaa'),
index=pd.date_range('2012-01-01',
periods=20, freq='s'))
df2 = df.copy()
df2.columns = ['a', 'b', 'c']
expected = df2.resample('5s').median()
result = df.resample('5s').median()
expected.columns = result.columns
assert_frame_equal(result, expected)
class TestTimeGrouper(object):
def setup_method(self, method):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
grouper = TimeGrouper('A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.sort_values()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
expected = self.ts.groupby(lambda x: x.year).count()
grouper = TimeGrouper('A', label='right', closed='right')
result = self.ts.groupby(grouper).count()
expected.index = result.index
assert_series_equal(result, expected)
result = self.ts.resample('A').count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', closed='right').prod()
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
tm.assert_index_equal(result.index, df.index)
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
with catch_warnings(record=True):
wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
_, grouper, _ = tg._get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert (isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex')
index_funcs = (tm.makeIntIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assert_raises_regex(TypeError,
"Only valid with "
"DatetimeIndex, TimedeltaIndex "
"or PeriodIndex, but got an "
"instance of %r" % name):
df.groupby(TimeGrouper('D'))
# PeriodIndex gives a specific error message
df = DataFrame({'a': np.random.randn(n)}, index=tm.makePeriodIndex(n))
with tm.assert_raises_regex(TypeError,
"axis must be a DatetimeIndex, but "
"got an instance of 'PeriodIndex'"):
df.groupby(TimeGrouper('D'))
def test_aaa_group_order(self):
# GH 12840
# check TimeGrouper perform stable sorts
n = 20
data = np.random.randn(n, 4)
df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
grouped = df.groupby(TimeGrouper(key='key', freq='D'))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)),
df[::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)),
df[1::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)),
df[2::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)),
df[3::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)),
df[4::5])
def test_aggregate_normal(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod', 'var', 'std', 'mean']:
expected = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# GH 7453
for func in ['size']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 7453
for func in ['first', 'last']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# if TimeGrouper is used included, 'nth' doesn't work yet
"""
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01',
freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
def test_aggregate_with_nat(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'sum', 'prod']:
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count']:
normal_result = getattr(normal_grouped, func)()
pad = DataFrame([[0, 0, 0, 0]], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
for func in ['size']:
normal_result = getattr(normal_grouped, func)()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 9925
assert dt_result.index.name == 'key'
# if NaT is included, 'var', 'std', 'mean', 'first','last'
# and 'nth' doesn't work yet
| agpl-3.0 |
mrshu/scikit-learn | examples/svm/plot_svm_regression.py | 5 | 1430 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynominial and RBF
kernels.
"""
print __doc__
###############################################################################
# Generate sample data
import numpy as np
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
import pylab as pl
pl.scatter(X, y, c='k', label='data')
pl.hold('on')
pl.plot(X, y_rbf, c='g', label='RBF model')
pl.plot(X, y_lin, c='r', label='Linear model')
pl.plot(X, y_poly, c='b', label='Polynomial model')
pl.xlabel('data')
pl.ylabel('target')
pl.title('Support Vector Regression')
pl.legend()
pl.show()
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 15 | 5780 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import (
assert_equal, assert_false, assert_true, assert_array_equal, assert_raises,
assert_warns, assert_warns_message, assert_no_warnings)
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import (
_equal_similarities_and_preferences
)
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
def test_affinity_propagation_fit_non_convergence():
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array and training samples should be labelled
# as noise (-1)
X = np.array([[0, 0], [1, 1], [-2, -2]])
# Force non-convergence by allowing only a single iteration
af = AffinityPropagation(preference=-10, max_iter=1)
assert_warns(ConvergenceWarning, af.fit, X)
assert_array_equal(np.empty((0, 2)), af.cluster_centers_)
assert_array_equal(np.array([-1, -1, -1]), af.labels_)
def test_affinity_propagation_equal_mutual_similarities():
X = np.array([[-1, 1], [1, -1]])
S = -euclidean_distances(X, squared=True)
# setting preference > similarity
cluster_center_indices, labels = assert_warns_message(
UserWarning, "mutually equal", affinity_propagation, S, preference=0)
# expect every sample to become an exemplar
assert_array_equal([0, 1], cluster_center_indices)
assert_array_equal([0, 1], labels)
# setting preference < similarity
cluster_center_indices, labels = assert_warns_message(
UserWarning, "mutually equal", affinity_propagation, S, preference=-10)
# expect one cluster, with arbitrary (first) sample as exemplar
assert_array_equal([0], cluster_center_indices)
assert_array_equal([0, 0], labels)
# setting different preferences
cluster_center_indices, labels = assert_no_warnings(
affinity_propagation, S, preference=[-20, -10])
# expect one cluster, with highest-preference sample as exemplar
assert_array_equal([1], cluster_center_indices)
assert_array_equal([0, 0], labels)
def test_affinity_propagation_predict_non_convergence():
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array
X = np.array([[0, 0], [1, 1], [-2, -2]])
# Force non-convergence by allowing only a single iteration
af = AffinityPropagation(preference=-10, max_iter=1).fit(X)
# At prediction time, consider new samples as noise since there are no
# clusters
assert_array_equal(np.array([-1, -1, -1]),
af.predict(np.array([[2, 2], [3, 3], [4, 4]])))
def test_equal_similarities_and_preferences():
# Unequal distances
X = np.array([[0, 0], [1, 1], [-2, -2]])
S = -euclidean_distances(X, squared=True)
assert_false(_equal_similarities_and_preferences(S, np.array(0)))
assert_false(_equal_similarities_and_preferences(S, np.array([0, 0])))
assert_false(_equal_similarities_and_preferences(S, np.array([0, 1])))
# Equal distances
X = np.array([[0, 0], [1, 1]])
S = -euclidean_distances(X, squared=True)
# Different preferences
assert_false(_equal_similarities_and_preferences(S, np.array([0, 1])))
# Same preferences
assert_true(_equal_similarities_and_preferences(S, np.array([0, 0])))
assert_true(_equal_similarities_and_preferences(S, np.array(0)))
| bsd-3-clause |
waynenilsen/statsmodels | examples/python/formulas.py | 33 | 4968 |
## Formulas: Fitting models using R-style formulas
# Since version 0.5.0, ``statsmodels`` allows users to fit statistical models using R-style formulas. Internally, ``statsmodels`` uses the [patsy](http://patsy.readthedocs.org/) package to convert formulas and data to the matrices that are used in model fitting. The formula framework is quite powerful; this tutorial only scratches the surface. A full description of the formula language can be found in the ``patsy`` docs:
#
# * [Patsy formula language description](http://patsy.readthedocs.org/)
#
# ## Loading modules and functions
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
##### Import convention
# You can import explicitly from statsmodels.formula.api
from statsmodels.formula.api import ols
# Alternatively, you can just use the `formula` namespace of the main `statsmodels.api`.
sm.formula.ols
# Or you can use the following conventioin
import statsmodels.formula.api as smf
# These names are just a convenient way to get access to each model's `from_formula` classmethod. See, for instance
sm.OLS.from_formula
# All of the lower case models accept ``formula`` and ``data`` arguments, whereas upper case ones take ``endog`` and ``exog`` design matrices. ``formula`` accepts a string which describes the model in terms of a ``patsy`` formula. ``data`` takes a [pandas](http://pandas.pydata.org/) data frame or any other data structure that defines a ``__getitem__`` for variable names like a structured array or a dictionary of variables.
#
# ``dir(sm.formula)`` will print(a list of available models.
#
# Formula-compatible models have the following generic call signature: ``(formula, data, subset=None, *args, **kwargs)``
#
# ## OLS regression using formulas
#
# To begin, we fit the linear model described on the [Getting Started](gettingstarted.html) page. Download the data, subset columns, and list-wise delete to remove missing observations:
dta = sm.datasets.get_rdataset("Guerry", "HistData", cache=True)
df = dta.data[['Lottery', 'Literacy', 'Wealth', 'Region']].dropna()
df.head()
# Fit the model:
mod = ols(formula='Lottery ~ Literacy + Wealth + Region', data=df)
res = mod.fit()
print(res.summary())
# ## Categorical variables
#
# Looking at the summary printed above, notice that ``patsy`` determined that elements of *Region* were text strings, so it treated *Region* as a categorical variable. `patsy`'s default is also to include an intercept, so we automatically dropped one of the *Region* categories.
#
# If *Region* had been an integer variable that we wanted to treat explicitly as categorical, we could have done so by using the ``C()`` operator:
res = ols(formula='Lottery ~ Literacy + Wealth + C(Region)', data=df).fit()
print(res.params)
# Patsy's mode advanced features for categorical variables are discussed in: [Patsy: Contrast Coding Systems for categorical variables](contrasts.html)
# ## Operators
#
# We have already seen that "~" separates the left-hand side of the model from the right-hand side, and that "+" adds new columns to the design matrix.
#
# ### Removing variables
#
# The "-" sign can be used to remove columns/variables. For instance, we can remove the intercept from a model by:
res = ols(formula='Lottery ~ Literacy + Wealth + C(Region) -1 ', data=df).fit()
print(res.params)
# ### Multiplicative interactions
#
# ":" adds a new column to the design matrix with the interaction of the other two columns. "*" will also include the individual columns that were multiplied together:
res1 = ols(formula='Lottery ~ Literacy : Wealth - 1', data=df).fit()
res2 = ols(formula='Lottery ~ Literacy * Wealth - 1', data=df).fit()
print(res1.params, '\n')
print(res2.params)
# Many other things are possible with operators. Please consult the [patsy docs](https://patsy.readthedocs.org/en/latest/formulas.html) to learn more.
# ## Functions
#
# You can apply vectorized functions to the variables in your model:
res = sm.ols(formula='Lottery ~ np.log(Literacy)', data=df).fit()
print(res.params)
# Define a custom function:
def log_plus_1(x):
return np.log(x) + 1.
res = sm.ols(formula='Lottery ~ log_plus_1(Literacy)', data=df).fit()
print(res.params)
# Any function that is in the calling namespace is available to the formula.
# ## Using formulas with models that do not (yet) support them
#
# Even if a given `statsmodels` function does not support formulas, you can still use `patsy`'s formula language to produce design matrices. Those matrices
# can then be fed to the fitting function as `endog` and `exog` arguments.
#
# To generate ``numpy`` arrays:
import patsy
f = 'Lottery ~ Literacy * Wealth'
y,X = patsy.dmatrices(f, df, return_type='dataframe')
print(y[:5])
print(X[:5])
# To generate pandas data frames:
f = 'Lottery ~ Literacy * Wealth'
y,X = patsy.dmatrices(f, df, return_type='dataframe')
print(y[:5])
print(X[:5])
print(sm.OLS(y, X).fit().summary())
| bsd-3-clause |
volpino/Yeps-EURAC | tools/plotting/plotter.py | 4 | 2247 | #!/usr/bin/env python
# python histogram input_file output_file column bins
import sys, os
import matplotlib; matplotlib.use('Agg')
from pylab import *
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
if __name__ == '__main__':
# parse the arguments
if len(sys.argv) != 6:
stop_err('Usage: python histogram.py input_file column bins output_file style')
sys.exit()
mode = sys.argv[5]
HIST = mode == 'hist'
try:
col = int(float(sys.argv[2]))
if HIST:
bin = int(float(sys.argv[3]))
else:
# hack, this parameter is the plotting style for scatter plots
if sys.argv[3] == 'P':
style = 'o'
elif sys.argv[3] == 'LP':
style = 'o-'
else:
style = '-'
except:
msg = 'Parameter were not numbers %s, %s' % (sys.argv[3], sys.argv[4])
stop_err(msg)
# validate arguments
inp_file = sys.argv[1]
out_file = sys.argv[4]
if HIST:
print "Histogram on column %s (%s bins)" % (col, bin)
else:
print "Scatterplot on column %s" % (col)
xcol= col -1
# read the file
values = []
try:
count = 0
for line in file(inp_file):
count += 1
line = line.strip()
if line and line[0] != '#':
values.append(float(line.split()[xcol]))
except Exception, e:
stop_err('%s' % e)
stop_err("Non numerical data at line %d, column %d" % (count, col) )
# plot the data
if HIST:
n, bins, patches = hist(values, bins=bin, normed=0)
else:
plot(values, style)
xlabel('values')
ylabel('counts')
if HIST:
title('Histogram of values over column %s (%s bins)' % (col, len(bins)) )
else:
title('Scatterplot over column %s' % col )
grid(True)
# the plotter detects types by file extension
png_out = out_file + '.png' # force it to png
savefig(png_out)
# shuffle it back and clean up
data = file(png_out, 'rb').read()
fp = open(out_file, 'wb')
fp.write(data)
fp.close()
os.remove(png_out)
| mit |
mengxn/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
kernc/scikit-learn | sklearn/tests/test_base.py | 45 | 7049 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
mdesco/dipy | doc/examples/snr_in_cc.py | 1 | 6475 | """
=============================================
SNR estimation for Diffusion-Weighted Images
=============================================
Computing the Signal-to-Noise-Ratio (SNR) of DW images is still an open question,
as SNR depends on the white matter structure of interest as well as
the gradient direction corresponding to each DWI.
In classical MRI, SNR can be defined as the ratio of the mean
of the signal divided by the standard deviation of the
underlying Gaussian noise, that is SNR = mean(signal) / std(noise).
The noise standard deviation can be
computed from the background in any of the DW images. How do we compute
the mean of the signal, and what signal?
The strategy here is to compute a 'worst-case' SNR for DWI. Several white matter
structures such as the corpus callosum (CC), corticospinal tract (CST), or
the superior longitudinal fasciculus (SLF) can be easily identified from
the colored-FA (cfa) map. In this example, we will use voxels from the CC,
which have the characteristic of being highly RED in the cfa map since they are mainly oriented in
the left-right direction. We know that the DW image
closest to the x-direction will be the one with the most attenuated diffusion signal.
This is the strategy adopted in several recent papers (see [1]_ and [2]_). It gives a good
indication of the quality of the DWI data.
First, we compute the tensor model in a brain mask (see the DTI example for more explanation).
"""
from __future__ import division, print_function
import nibabel as nib
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.segment.mask import median_otsu
from dipy.reconst.dti import TensorModel
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
affine = img.get_affine()
print('Computing brain mask...')
b0_mask, mask = median_otsu(data)
print('Computing tensors...')
tenmodel = TensorModel(gtab)
tensorfit = tenmodel.fit(data, mask=mask)
"""Next, we set our red-green-blue thresholds to (0.6, 1) in the x axis
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.
Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.
The following lines perform these two operations and then saves the computed mask.
"""
print('Computing worst-case/best-case SNR using the corpus callosum...')
from dipy.segment.mask import segment_from_cfa
from dipy.segment.mask import bounding_box
threshold = (0.6, 1, 0, 0.1, 0, 0.1)
CC_box = np.zeros_like(data[..., 0])
mins, maxs = bounding_box(mask)
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff
CC_box[bounds_min[0]:bounds_max[0],
bounds_min[1]:bounds_max[1],
bounds_min[2]:bounds_max[2]] = 1
mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
threshold, return_cfa=True)
cfa_img = nib.Nifti1Image((cfa*255).astype(np.uint8), affine)
mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine)
nib.save(mask_cc_part_img, 'mask_CC_part.nii.gz')
import matplotlib.pyplot as plt
region = 40
fig = plt.figure('Corpus callosum segmentation')
plt.subplot(1, 2, 1)
plt.title("Corpus callosum (CC)")
plt.axis('off')
red = cfa[..., 0]
plt.imshow(np.rot90(red[region, ...]))
plt.subplot(1, 2, 2)
plt.title("CC mask used for SNR computation")
plt.axis('off')
plt.imshow(np.rot90(mask_cc_part[region, ...]))
fig.savefig("CC_segmentation.png", bbox_inches='tight')
"""
.. figure:: CC_segmentation.png
:align: center
"""
"""Now that we are happy with our crude CC mask that selected voxels in the x-direction,
we can use all the voxels to estimate the mean signal in this region.
"""
mean_signal = np.mean(data[mask_cc_part], axis=0)
"""Now, we need a good background estimation. We will re-use the brain mask
computed before and invert it to catch the outside of the brain. This could
also be determined manually with a ROI in the background.
[Warning: Certain MR manufacturers mask out the outside of the brain with 0's.
One thus has to be careful how the noise ROI is defined].
"""
from scipy.ndimage.morphology import binary_dilation
mask_noise = binary_dilation(mask, iterations=10)
mask_noise[..., :mask_noise.shape[-1]//2] = 1
mask_noise = ~mask_noise
mask_noise_img = nib.Nifti1Image(mask_noise.astype(np.uint8), affine)
nib.save(mask_noise_img, 'mask_noise.nii.gz')
noise_std = np.std(data[mask_noise, :])
"""We can now compute the SNR for each DWI. For example, report SNR
for DW images with gradient direction that lies the closest to
the X, Y and Z axes.
"""
# Exclude null bvecs from the search
idx = np.sum(gtab.bvecs, axis=-1) == 0
gtab.bvecs[idx] = np.inf
axis_X = np.argmin(np.sum((gtab.bvecs-np.array([1, 0, 0]))**2, axis=-1))
axis_Y = np.argmin(np.sum((gtab.bvecs-np.array([0, 1, 0]))**2, axis=-1))
axis_Z = np.argmin(np.sum((gtab.bvecs-np.array([0, 0, 1]))**2, axis=-1))
for direction in [0, axis_X, axis_Y, axis_Z]:
SNR = mean_signal[direction]/noise_std
if direction == 0 :
print("SNR for the b=0 image is :", SNR)
else :
print("SNR for direction", direction, " ", gtab.bvecs[direction], "is :", SNR)
"""SNR for the b=0 image is : ''42.0695455758''"""
"""SNR for direction 58 [ 0.98875 0.1177 -0.09229] is : ''5.46995373635''"""
"""SNR for direction 57 [-0.05039 0.99871 0.0054406] is : ''23.9329492871''"""
"""SNR for direction 126 [-0.11825 -0.039925 0.99218 ] is : ''23.9965694823''"""
"""
Since the CC is aligned with the X axis, the lowest SNR is for that gradient
direction. In comparison, the DW images in
the perpendical Y and Z axes have a high SNR. The b0 still exhibits the highest SNR,
since there is no signal attenuation.
Hence, we can say the Stanford diffusion
data has a 'worst-case' SNR of approximately 5, a
'best-case' SNR of approximately 24, and a SNR of 42 on the b0 image.
"""
"""
References:
.. [1] Descoteaux, M., Deriche, R., Le Bihan, D., Mangin, J.-F., and Poupon, C.
Multiple q-shell diffusion propagator imaging.
Medical image analysis, 15(4), 603, 2011.
.. [2] Jones, D. K., Knosche, T. R., & Turner, R.
White Matter Integrity, Fiber Count, and Other Fallacies: The Dos and Don'ts of Diffusion MRI.
NeuroImage, 73, 239, 2013.
"""
| bsd-3-clause |
sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/nltk/parse/dependencygraph.py | 5 | 31002 | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Jason Narad <jason.narad@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (modifications)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from nltk.tree import Tree
from nltk.compat import python_2_unicode_compatible, string_types
#################################################################
# DependencyGraph Class
#################################################################
@python_2_unicode_compatible
class DependencyGraph(object):
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(self, tree_str=None, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Dependency graph.
We place a dummy `TOP` node with the index 0, since the root node is
often assigned 0 as its head. This also means that the indexing of the
nodes corresponds directly to the Malt-TAB format, which starts at 1.
If zero-based is True, then Malt-TAB-like input with node numbers
starting at 0 and the root node assigned -1 (as produced by, e.g.,
zpar).
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
self.nodes = defaultdict(lambda: {'address': None,
'word': None,
'lemma': None,
'ctag': None,
'tag': None,
'feats': None,
'head': None,
'deps': defaultdict(list),
'rel': None,
})
self.nodes[0].update(
{
'ctag': 'TOP',
'tag': 'TOP',
'address': 0,
}
)
self.root = None
if tree_str:
self._parse(
tree_str,
cell_extractor=cell_extractor,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
del self.nodes[address]
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodes.values():
new_deps = []
for dep in node['deps']:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node['deps'] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
relation = self.nodes[mod_address]['rel']
self.nodes[head_address]['deps'].setdefault(relation, [])
self.nodes[head_address]['deps'][relation].append(mod_address)
#self.nodes[head_address]['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodes.values():
for node2 in self.nodes.values():
if node1['address'] != node2['address'] and node2['rel'] != 'TOP':
relation = node2['rel']
node1['deps'].setdefault(relation, [])
node1['deps'][relation].append(node2['address'])
#node1['deps'].append(node2['address'])
def get_by_address(self, node_address):
"""Return the node with the given address."""
return self.nodes[node_address]
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
return node_address in self.nodes
def to_dot(self):
"""Return a dot representation suitable for using with Graphviz.
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> print(dg.to_dot())
digraph G{
edge [dir=forward]
node [shape=plaintext]
<BLANKLINE>
0 [label="0 (None)"]
0 -> 2 [label="ROOT"]
1 [label="1 (John)"]
2 [label="2 (loves)"]
2 -> 1 [label=""]
2 -> 3 [label=""]
3 [label="3 (Mary)"]
}
"""
# Start the digraph specification
s = 'digraph G{\n'
s += 'edge [dir=forward]\n'
s += 'node [shape=plaintext]\n'
# Draw the remaining nodes
for node in sorted(self.nodes.values(), key=lambda v: v['address']):
s += '\n%s [label="%s (%s)"]' % (node['address'], node['address'], node['word'])
for rel, deps in node['deps'].items():
for dep in deps:
if rel is not None:
s += '\n%s -> %s [label="%s"]' % (node['address'], dep, rel)
else:
s += '\n%s -> %s ' % (node['address'], dep)
s += "\n}"
return s
def _repr_svg_(self):
"""Show SVG representation of the transducer (IPython magic).
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> dg._repr_svg_().split('\\n')[0]
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
"""
dot_string = self.to_dot()
try:
process = subprocess.Popen(
['dot', '-Tsvg'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError:
raise Exception('Cannot find the dot binary from Graphviz package')
out, err = process.communicate(dot_string)
if err:
raise Exception(
'Cannot create svg representation by running dot from string: {}'
''.format(dot_string))
return out
def __str__(self):
return pformat(self.nodes)
def __repr__(self):
return "<DependencyGraph with {0} nodes>".format(len(self.nodes))
@staticmethod
def load(filename, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""
:param filename: a name of a file in Malt-TAB format
:param zero_based: nodes in the input file are numbered starting from 0
rather than 1 (as produced by, e.g., zpar)
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
:return: a list of DependencyGraphs
"""
with open(filename) as infile:
return [
DependencyGraph(
tree_str,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
for tree_str in infile.read().split('\n\n')
]
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node['address']):
self.nodes[node['address']].update(node)
def _parse(self, input_, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Parse a sentence.
:param extractor: a function that given a tuple of cells returns a
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
rel``.
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, '', head, ''
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, '', head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, tag, tag, '', head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, string_types):
input_ = (line for line in input_.split('\n'))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(cell_number)
)
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells, index)
except (TypeError, ValueError):
# cell_extractor doesn't take 2 arguments or doesn't return 8
# values; assume the cell_extractor is an older external
# extractor and doesn't accept or return an index.
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == '_':
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
'address': index,
'word': word,
'lemma': lemma,
'ctag': ctag,
'tag': tag,
'feats': feats,
'head': head,
'rel': rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]['deps'][rel].append(index)
if self.nodes[0]['deps'][top_relation_label]:
root_address = self.nodes[0]['deps'][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node "
"that depends on the root element."
)
def _word(self, node, filter=True):
w = node['word']
if filter:
if w != ',':
return w
return w
def _tree(self, i):
""" Turn dependency graphs into NLTK trees.
:param int i: index of a node
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
"""
Extract dependency triples of the form:
((head word, head tag), rel, (dep word, dep tag))
"""
if not node:
node = self.root
head = (node['word'], node['ctag'])
for i in sorted(chain.from_iterable(node['deps'].values())):
dep = self.get_by_address(i)
yield (head, dep['rel'], (dep['word'], dep['ctag']))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]['head']
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]['rel']
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
"""Check whether there are cycles.
>>> dg = DependencyGraph(treebank_data)
>>> dg.contains_cycle()
False
>>> cyclic_dg = DependencyGraph()
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
>>> cyclic_dg.nodes = {
... 0: top,
... 1: child1,
... 2: child2,
... 3: child3,
... 4: child4,
... }
>>> cyclic_dg.root = top
>>> cyclic_dg.contains_cycle()
[3, 1, 2, 4]
"""
distances = {}
for node in self.nodes.values():
for dep in node['deps']:
key = tuple([node['address'], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node['deps']:
if dep == goal_node_index:
return [curr_node['address']]
for dep in curr_node['deps']:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node['address'])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
if style == 3:
template = '{word}\t{tag}\t{head}\n'
elif style == 4:
template = '{word}\t{tag}\t{head}\t{rel}\n'
elif style == 10:
template = '{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n'
else:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(style)
)
return ''.join(template.format(i=i, **node) for i, node in sorted(self.nodes.items()) if node['tag'] != 'TOP')
def nx_graph(self):
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n))
for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]['word']
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
"""Dependency graph exception."""
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph("""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
""")
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig('tree.png')
pylab.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print('Mass conll_read demo...')
graphs = [DependencyGraph(entry)
for entry in conll_data2.split('\n\n') if entry]
for graph in graphs:
tree = graph.tree()
print('\n')
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0})
cyclic_dg.add_node({'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1})
cyclic_dg.add_node({'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2})
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3})
cyclic_dg.add_node({'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == '__main__':
demo()
| mit |
untom/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
akhil22/deepmatching_1.2_c- | viz.py | 4 | 3145 | import sys
from PIL import Image
from numpy import *
from matplotlib.pyplot import *
def show_correspondences( img0, img1, corr ):
assert corr.shape[-1]==6
corr = corr[corr[:,4]>0,:]
# make beautiful colors
center = corr[:,[1,0]].mean(axis=0) # array(img0.shape[:2])/2 #
corr[:,5] = arctan2(*(corr[:,[1,0]] - center).T)
corr[:,5] = int32(64*corr[:,5]/pi) % 128
set_max = set(corr[:,5])
colors = {m:i for i,m in enumerate(set_max)}
colors = {m:cm.hsv(i/float(len(colors))) for m,i in colors.items()}
def motion_notify_callback(event):
if event.inaxes==None: return
numaxis = event.inaxes.numaxis
if numaxis<0: return
x,y = event.xdata, event.ydata
ax1.lines = []
ax2.lines = []
n = sum((corr[:,2*numaxis:2*(numaxis+1)] - [x,y])**2,1).argmin() # find nearest point
print "\rdisplaying #%d (%d,%d) --> (%d,%d), score=%g from maxima %d" % (n,
corr[n,0],corr[n,1],corr[n,2],corr[n,3],corr[n,4],corr[n,5]),;sys.stdout.flush()
x,y = corr[n,0:2]
ax1.plot(x,y,'+',ms=10,mew=2,color='blue',scalex=False,scaley=False)
x,y = corr[n,2:4]
ax2.plot(x,y,'+',ms=10,mew=2,color='red',scalex=False,scaley=False)
# we redraw only the concerned axes
renderer = fig.canvas.get_renderer()
ax1.draw(renderer)
ax2.draw(renderer)
fig.canvas.blit(ax1.bbox)
fig.canvas.blit(ax2.bbox)
def noticks():
xticks([])
yticks([])
clf()
ax1 = subplot(221)
ax1.numaxis = 0
imshow(img0,interpolation='nearest')
noticks()
ax2 = subplot(222)
ax2.numaxis = 1
imshow(img1,interpolation='nearest')
noticks()
ax = subplot(223)
ax.numaxis = -1
imshow(img0/2+64,interpolation='nearest')
for m in set_max:
plot(corr[corr[:,5]==m,0],corr[corr[:,5]==m,1],'+',ms=10,mew=2,color=colors[m],scalex=0,scaley=0)
noticks()
ax = subplot(224)
ax.numaxis = -1
imshow(img1/2+64,interpolation='nearest')
for m in set_max:
plot(corr[corr[:,5]==m,2],corr[corr[:,5]==m,3],'+',ms=10,mew=2,color=colors[m],scalex=0,scaley=0)
noticks()
subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.99,
wspace=0.02, hspace=0.02)
fig = get_current_fig_manager().canvas.figure
cid_move = fig.canvas.mpl_connect('motion_notify_event',motion_notify_callback)
print "Move your mouse over the top images to visualize individual matches"
show()
fig.canvas.mpl_disconnect(cid_move)
if __name__=='__main__':
args = sys.argv[1:]
img0 = array(Image.open(args[0]).convert('RGB'))
img1 = array(Image.open(args[1]).convert('RGB'))
retained_matches = []
for line in sys.stdin:
line = line.split()
if not line or len(line)!=6 or not line[0][0].isdigit(): continue
x0, y0, x1, y1, score, index = line
retained_matches.append((float(x0),float(y0),float(x1),float(y1),float(score),float(index)))
assert retained_matches, 'error: no matches piped to this program'
show_correspondences(img0, img1, array(retained_matches))
| gpl-3.0 |
google-code-export/currentcostgui | currentcostlivedata.py | 9 | 35520 | # -*- coding: utf-8 -*-
#
# CurrentCost GUI
#
# Copyright (C) 2008 Dale Lane
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The author of this code can be contacted at Dale.Lane@gmail.com
# Any contact about this application is warmly welcomed.
#
import wx
import csv
import datetime
import time
import pytz
from matplotlib.dates import DayLocator, HourLocator, MinuteLocator, DateFormatter, num2date
from matplotlib.ticker import FuncFormatter, ScalarFormatter
from matplotlib.widgets import SpanSelector
from threading import Thread, Lock
from currentcostcomlive import CurrentCostSerialLiveConnection
from nationalgriddata import NationalGridDataSource
from electricitygeneration import CurrentCostElectricityGeneration
from tracer import CurrentCostTracer
# this class provides logging and diagnostics
trc = CurrentCostTracer()
#ZERO = datetime.timedelta(0)
#HOUR = datetime.timedelta(hours=1)##
#class UTC(datetime.tzinfo):
# def utcoffset(self, dt):
# return ZERO
# def tzname(self, dt):
# return "UTC"
# def dst(self, dt):
# return ZERO#
#utc = UTC()
#
# Displays a graph showing live CurrentCost data.
#
# Dale Lane (http://dalelane.co.uk/blog)
#
class CurrentCostLiveData():
#
# where are we getting live data from?
CONNECTION_NONE = 0
CONNECTION_MQTT = 1
CONNECTION_SERIAL = 2
connectionType = CONNECTION_NONE
closing = False
#
# which other live feeds should be shown?
showNationalGridDemand = False
showNationalGridFrequency = False
# graphs where the live data is shown
livegraph = None
livegraphNGDemand = None
livegraphNGFrequency = None
#
# handle to the GUI where the graph is shown
guicallback = None
#
# live data store - dates and the readings
# assuming equivalent indices - e.g. the third date goes with
# the third reading
ccdates = []
ccreadings = []
ccsplitreadings = []
#
# National Grid data store - dates and the readings
# assuming equivalent indices - e.g. the third date goes with
# the third reading
ngdatadates = []
ngdemandreadings = []
ngfreqreadings = []
ngfreqzeroline = []
#
# likely limits for National Grid frequency data
#
# taken from http://www.nationalgrid.com/uk/Electricity/Data/Realtime/
#
NGFREQ_MIN = 49.8
NGFREQ_ZERO = 50.00
NGFREQ_MAX = 50.2
# background threads actually getting the live data
mqttClient = None
comClient = None
ngdClient = None
genClient = CurrentCostElectricityGeneration()
# when did we start tracking live data?
starttime = None
# how should we display labels on the axes?
stddatefmtter = DateFormatter('%H:%M.%S')
freqfmtter = None
# there can be two threads updating the graph. to avoid them both doing it
# at once, we need a thread lock
lock = Lock()
# if a modal dialog is open we should stop redrawing graphs
dlgOpen = False
# handle to db used to persist data
appDatabase = None
def ExportLiveData(self, filepath):
f = open(filepath, 'wt')
fieldnames = ('Time', 'kWH')
writer = csv.DictWriter(f, fieldnames=fieldnames, dialect='excel')
headers = {}
for n in fieldnames:
headers[n] = n
writer.writerow(headers)
for i in range(0, len(self.ccdates) - 1):
writer.writerow( { 'Time': self.ccdates[i],
'kWH' : self.ccreadings[i] } )
f.close()
#
# redraw all active graphs
#
def redrawGraph(self):
global trc
trc.FunctionEntry("currentcostlivedata :: redrawGraph")
if self.dlgOpen:
trc.Trace("dialog is open")
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return
trc.Trace(str(len(self.ccdates)) + " dates and " +
str(len(self.ccreadings)) + " data points")
trc.Trace("aquiring lock")
self.lock.acquire()
#
# Step 1:
# update the graph plots
#
if len(self.ccdates) > 0:
try:
trc.Trace("plotting live data")
self.livegraph.plot_date(self.ccdates,
self.ccreadings,
'r-')
except Exception, e:
trc.Trace("failed to plot data on live graph")
trc.Trace(str(e))
trc.Trace(str(e.message))
if self.closing == False:
trc.Error('Failed to plot data on livegraph')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Error("have " + str(len(self.ccdates)) + " dates and " +
str(len(self.ccreadings)) + " data points")
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
else:
trc.Trace("no dates to plot")
if self.livegraphNGDemand != None and len(self.ngdatadates) > 0:
try:
# update the graph
trc.Trace("plotting National Grid demand data")
self.livegraphNGDemand.plot_date(self.ngdatadates,
self.ngdemandreadings,
'b-')
except Exception, e:
trc.Error('DEBUG: error - failed to plot demand data on national grid graph')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
if self.livegraphNGFrequency != None and len(self.ngdatadates) > 0:
try:
# update the graph
trc.Trace("plotting National Grid frequency data")
self.livegraphNGFrequency.plot_date(self.ngdatadates,
self.ngfreqreadings,
'b-')
# add a 'zero' (e.g. 50Hz) line to the graph
# I tried to do this using axhline but it threw some weird
# ordinal must be >= 1 errors when I tried doing any additional
# plots. This is a fairly hacky workaround
trc.Trace("plotting zero line")
self.livegraphNGFrequency.plot_date(self.ngdatadates,
self.ngfreqzeroline,
'g-')
except Exception, e:
trc.Error('DEBUG: error - failed to plot frequency data on national grid graph')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
#
# Step 2:
# disable auto-scaling
# there is a bug when use twinx to plot data series for multiple y
# axes on a single graph. the scaling sometimes gets out of sync, so
# you get two x-axes overlaid on each other, with slightly different
# zoom factors
#
# so we scale all x-axes manually
#
trc.Trace("disabling auto-scaling")
if len(self.ccdates) > 0:
self.livegraph.set_autoscale_on = False
if self.livegraphNGDemand != None:
self.livegraphNGDemand.set_autoscale_on = False
if self.livegraphNGFrequency != None:
self.livegraphNGFrequency.set_autoscale_on = False
#
# Step 3:
# rotate labels on x-axis
# makes the timestamps fit better when rendered vertically
#
try:
trc.Trace("rotating labels on x-axis")
for label in self.livegraph.get_xticklabels():
label.set_rotation(90)
except Exception, e:
trc.Error('DEBUG: error - failed to rotate axis labels on live graph')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
if self.livegraphNGDemand != None:
try:
trc.Trace("rotating labels on x-axis for National Grid demand data")
for label in self.livegraphNGDemand.get_xticklabels():
label.set_rotation(90)
except Exception, e:
trc.Error('DEBUG: error - failed to rotate axis labels on NG demand graph')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
if self.livegraphNGFrequency != None:
try:
trc.Trace("rotating labels on x-axis for National Grid frequency data")
for label in self.livegraphNGFrequency.get_xticklabels():
label.set_rotation(90)
except Exception, e:
trc.Error('DEBUG: error - failed to rotate axis labels on NG frequency graph')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
#
# Step 4:
# manually zoom all graphs to same scale - keeping x-axes in sync
#
trc.Trace("setting xmin/xmax")
endtime = datetime.datetime.now(pytz.utc)
self.livegraph.set_xlim(xmin=self.starttime, xmax=endtime)
if self.livegraphNGDemand != None:
self.livegraphNGDemand.set_xlim(xmin=self.starttime, xmax=endtime)
if self.livegraphNGFrequency != None:
self.livegraphNGFrequency.set_xlim(xmin=self.starttime, xmax=endtime)
self.livegraphNGFrequency.set_ylim(ymin=self.NGFREQ_MIN, ymax=self.NGFREQ_MAX)
#
# Step 5:
# format x-axis labels
# don't know how to switch one of these off, so we create multiple
# identical axes, and try to ignore the fact that you can see it's
# slightly thicker as drawn twice in the same place!
try:
# format the dates on the x-axis
if len(self.ccdates) > 0:
trc.Trace("formatting x-axis labels")
self.livegraph.xaxis.set_major_formatter(self.stddatefmtter)
self.livegraph.xaxis.set_minor_formatter(self.stddatefmtter)
if self.livegraphNGDemand != None:
trc.Trace("formatting x-axis labels for National Grid demand")
self.livegraphNGDemand.xaxis.set_major_formatter(self.stddatefmtter)
self.livegraphNGDemand.xaxis.set_minor_formatter(self.stddatefmtter)
if self.livegraphNGFrequency != None:
trc.Trace("formatting x-axis labels for National Grid frequency")
self.livegraphNGFrequency.xaxis.set_major_formatter(self.stddatefmtter)
self.livegraphNGFrequency.xaxis.set_minor_formatter(self.stddatefmtter)
self.livegraphNGFrequency.yaxis.set_major_formatter(self.freqfmtter)
for line in self.livegraphNGFrequency.get_yticklines():
line.set_markersize(0)
except Exception, e:
trc.Error('DEBUG: error - failed to assign xaxis formatters')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
#
# Step 6:
# final step - redraw all active graphs
#
try:
trc.Trace("redrawing canvas")
self.livegraph.figure.canvas.draw()
except Exception, e:
trc.Error('DEBUG: error - failed to redraw live canvas')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
if self.livegraphNGDemand != None:
try:
trc.Trace("redrawing National Grid demand canvas")
self.livegraphNGDemand.figure.canvas.draw()
except Exception, e:
trc.Error('DEBUG: error - failed to redraw NG demand canvas')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
if self.livegraphNGFrequency != None:
try:
trc.Trace("redrawing National Grid frequency canvas")
self.livegraphNGFrequency.figure.canvas.draw()
except Exception, e:
trc.Error('DEBUG: error - failed to redraw NG frequency canvas')
trc.Error(str(e))
trc.Error(str(e.message))
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return False
#
# graph redraw complete
trc.Trace("releasing lock")
self.lock.release()
trc.FunctionExit("currentcostlivedata :: redrawGraph")
return True
#
# called when another CurrentCost reading is available
#
# the new reading is appended to the set, and the graph is refreshed
#
def updateGraph(self, ccreading):
global trc
trc.FunctionEntry("currentcostlivedata :: updateGraph")
trc.Trace("new data: " + str(ccreading))
if ccreading > 0:
# store the new reading
try:
x = datetime.datetime.now(pytz.utc)
trc.Trace("timestamp : " + repr(x))
self.ccdates.append(x)
self.ccreadings.append(ccreading)
self.ccsplitreadings.append(self.genClient.splitBySource(ccreading))
trc.Trace("stored reading")
except Exception, err:
trc.Error("failed to store live reading")
trc.Error(str(err))
# redraw the graph with the new reading
self.redrawGraph()
else:
trc.Trace("ignoring zero reading")
trc.FunctionExit("currentcostlivedata :: updateGraph")
#
# prepare the graph used to display live CurrentCost data
#
def prepareCurrentcostDataGraph(self, graphaxes):
global trc
trc.FunctionEntry("currentcostlivedata :: prepareCurrentcostDataGraph")
# prepare graph for drawing
self.livegraph = graphaxes
self.livegraph.set_ylabel('kW')
self.livegraph.grid(True)
self.livegraph.set_autoscale_on = False
trc.FunctionExit("currentcostlivedata :: prepareCurrentcostDataGraph")
#
# called to create a connection to the CurrentCost meter
#
def connect(self, guihandle, connType, ccdb, graphaxes, ipaddr, topic, com):
global trc
trc.FunctionEntry("currentcostlivedata :: connect")
self.appDatabase = ccdb
# start background thread
qDlg = wx.MessageDialog(guihandle,
"Would you like to download National Grid generation data? (Requires an Internet connection).\n" +
" If 'Yes', this will download data about the source of National Grid electricity while Live data is collected.\n" +
" Click on 'Show live data' -> 'National electricity generation' to display data collected",
"CurrentCost",
style=(wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION))
dwldResponse = qDlg.ShowModal()
qDlg.Destroy()
trc.Trace("user response to National Grid question: " + repr(dwldResponse))
if dwldResponse == wx.ID_YES:
trc.Trace("starting background thread for National Grid data")
self.genClient.startBackgroundThread()
# store globals
self.connectionType = connType
self.livegraph = graphaxes
self.guicallback = guihandle
# prepare graph for drawing
trc.Trace("preparing livegraph cla")
self.livegraph.cla()
self.prepareCurrentcostDataGraph(graphaxes)
if self.starttime == None:
trc.Trace("no starttime - setting a starttime now")
self.starttime = datetime.datetime.now(pytz.utc)
if self.connectionType == self.CONNECTION_MQTT:
trc.Trace("connection type: MQTT")
self.ipaddress = ipaddr
self.topicstring = topic
mqttClientModule = __import__("currentcostmqttlive")
self.mqttClient = mqttClientModule.CurrentCostMQTTLiveConnection()
backgroundThread = MQTTUpdateThread(self.mqttClient, ipaddr, topic, self)
backgroundThread.start()
elif self.connectionType == self.CONNECTION_SERIAL:
trc.Trace("connection type: serial")
self.comport = com
trc.Trace("creating serial connection for live data")
self.comClient = CurrentCostSerialLiveConnection()
trc.Trace("creating background thread for CurrentCost data")
backgroundThread = SerialUpdateThread(self.comClient, com, self)
backgroundThread.start()
else:
trc.Error("unsupported connection type : " + str(self.connectionType))
trc.Trace("currentcostlivedata :: creating span selector")
span = SpanSelector(self.livegraph, self.onselect, 'horizontal', useblit=True,
rectprops=dict(alpha=0.5, facecolor='red'))
trc.FunctionExit("currentcostlivedata :: connect")
#
# called to disconnect from the CurrentCost meter
#
# existing graph should be left untouched
#
def disconnect(self):
global trc
trc.FunctionEntry("currentcostlivedata :: disconnect")
self.closing = True
if self.connectionType == self.CONNECTION_MQTT:
if self.mqttClient != None:
self.mqttClient.Disconnect()
elif self.connectionType == self.CONNECTION_SERIAL:
if self.comClient != None:
self.comClient.Disconnect()
if self.ngdClient != None:
self.ngdClient.stopUpdates()
self.genClient.stopBackgroundThread()
# re-initialise variables
self.connectionType = self.CONNECTION_NONE
self.livegraph = None
trc.FunctionExit("currentcostlivedata :: disconnect")
#
# called to return an error to the GUI for displaying to the user
#
# we disconnect before displaying the error
#
def exitOnError(self, errmsg):
global trc
trc.FunctionEntry("currentcostlivedata :: exitOnError")
self.disconnect()
if (self.guicallback != None):
self.guicallback.displayLiveConnectFailure(errmsg)
trc.Trace("error message : " + str(errmsg))
trc.FunctionExit("currentcostlivedata :: exitOnError")
#
# called when another National Grid data is available
#
# the new reading is appended to the set, and the graph is refreshed
#
def updateNationalGridGraph(self, ngdemand, ngfrequency):
# store the new National Grid data readings
if ngdemand != None and ngfrequency != None:
self.ngdatadates.append(datetime.datetime.now(pytz.utc))
self.ngdemandreadings.append(ngdemand)
self.ngfreqreadings.append(ngfrequency)
self.ngfreqzeroline.append(self.NGFREQ_ZERO)
# if we are also plotting live CurrentCost readings, we allow the
# CurrentCost update function to redraw the graph (otherwise,
# having two threads redrawing the graph at the same time tends to
# screw matplotlib up).
# if we are only plotting National Grid data, then we need to redraw
# the graph now
if self.connectionType == self.CONNECTION_NONE:
self.redrawGraph()
#
# start the download and display of national electricity demand
# data from the National Grid
#
def startNationalGridDemandData(self, livegraphaxes):
if self.showNationalGridDemand == False:
# we are not currently showing national demand data, but we
# are about to start
self.showNationalGridDemand = True
# if this is a new graph, we need to make a note of the
# far-left x-axis value for zooming purposes
if self.starttime == None:
self.starttime = datetime.datetime.now(pytz.utc)
# store a handle to the parent graph if required (only if we
# are viewing National Grid data without personal CurrentCost data)
if livegraphaxes != None:
self.livegraph = livegraphaxes
# we (currently) cannot show both demand and frequency on the same
# graph. so if there is an existing graph for frequency data, we
# need to delete it now
if self.livegraphNGFrequency != None:
self.livegraphNGFrequency = None
# if we are re-starting an existing graph, we don't need to create
# the axes to draw on.
# otherwise, we create them now
if self.livegraphNGDemand == None:
self.livegraphNGDemand = self.livegraph.twinx()
self.livegraphNGDemand.set_ylabel('UK electricity demand (MW)')
# create a background thread that will poll the National Grid
# website and return national electricity demand values
if self.ngdClient == None:
self.ngdClient = NationalGridUpdateThread(self)
self.ngdClient.start()
#
# stop the download and display of national electricity demand
# data from the National Grid
#
def stopNationalGridDemandData(self):
if self.showNationalGridDemand == True:
# we are currently showing national demand data, but we are
# about to stop
self.showNationalGridDemand = False
# stop the background thread
self.ngdClient.stopUpdates()
# delete the background thread object
self.ngdClient = None
#
# stop the display of national electricity demand
# data from the National Grid
#
def pauseNationalGridDemandData(self):
if self.showNationalGridDemand == True:
# we are currently showing national demand data, but we are
# about to stop
self.showNationalGridDemand = False
#
# start the download and display of national electricity frequency
# data from the National Grid
#
def startNationalGridFrequencyData(self, livegraphaxes):
if self.showNationalGridFrequency == False:
# we are not currently showing national frequency data, but we
# are about to start
self.showNationalGridFrequency = True
# if this is a new graph, we need to make a note of the
# far-left x-axis value for zooming purposes
if self.starttime == None:
self.starttime = datetime.datetime.now(pytz.utc)
# store a handle to the parent graph if required (only if we
# are viewing National Grid data without personal CurrentCost data)
if livegraphaxes != None:
self.livegraph = livegraphaxes
# we (currently) cannot show both demand and frequency on the same
# graph. so if there is an existing graph for demand data, we
# need to delete it now
if self.livegraphNGDemand != None:
self.livegraphNGDemand = None
# if we are re-starting an existing graph, we don't need to create
# the axes to draw on.
# otherwise, we create them now
if self.livegraphNGFrequency == None:
self.livegraphNGFrequency = self.livegraph.twinx()
self.livegraphNGFrequency.set_ylabel('UK national electricity supply vs demand')
self.freqfmtter = FuncFormatter(self.formatFrequencyData)
# create a background thread that will poll the National Grid
# website and return national electricity demand values
if self.ngdClient == None:
self.ngdClient = NationalGridUpdateThread(self)
self.ngdClient.start()
#
# stop the download and display of national electricity frequency
# data from the National Grid
#
def stopNationalGridFrequencyData(self):
if self.showNationalGridFrequency == True:
# we are currently showing national frequency data, but we are
# about to stop
self.showNationalGridFrequency = False
# stop the background thread
self.ngdClient.stopUpdates()
# delete the background thread object
self.ngdClient = None
#
# stop the display of national electricity frequency data from the
# National Grid
#
def pauseNationalGridFrequencyData(self):
if self.showNationalGridFrequency == True:
# we are currently showing national frequency data, but we are
# about to stop
self.showNationalGridFrequency = False
#
# custom axis label formatter - used to transform a frequency Hz value for
# the National Grid power supply into it's meaning in terms of national
# electricity supply vs demand.
#
# meaning taken from http://dynamicdemand.co.uk/grid.htm
#
def formatFrequencyData(self, x, pos=None):
if round(x, 2) == 50.00:
return 'supply = demand'
elif round(x, 2) == 49.90:
return 'supply > demand'
elif round(x, 2) == 50.10:
return 'supply < demand'
else:
return ''
def prepareElectricitySourceGraph(self, targetTab):
global trc
trc.FunctionEntry("prepareElectricitySourceGraph")
# TODO - protect against empty data
self.genClient.initialiseGraph(list(self.ccdates),
list(self.ccsplitreadings),
targetTab,
self.stddatefmtter)
trc.FunctionExit("prepareElectricitySourceGraph")
#
# calculate how much electricity was used between the span of the graph
# selected by the user
#
# xmin,xmax identify the x values of the range in the graph span
#
def onselect(self, xmin, xmax):
global trc
trc.FunctionEntry("currentcostlivedata :: onselect")
trc.Trace("xmin : " + repr(xmin) + ", xmax : " + repr(xmax))
datelo = num2date(xmin)
datehi = num2date(xmax)
dateloReading = None
datehiReading = None
onesecond = 1.0 / 3600.0
totalUsage = 0.0
# note: it's safe to assume that self.ccdates is already sorted
for idx, nextReading in enumerate(self.ccdates):
if nextReading < datelo:
dateloReading = idx
delta = self.ccdates[idx + 1] - self.ccdates[idx]
trc.Trace("A : " + repr(self.ccreadings[idx]) + " for " + repr(delta.seconds) + " seconds")
timeInHours = delta.seconds * onesecond
kwhUsage = timeInHours * self.ccreadings[idx]
trc.Trace(" " + repr(kwhUsage))
totalUsage = kwhUsage
elif nextReading < datehi:
datehiReading = idx
delta = self.ccdates[idx + 1] - self.ccdates[idx]
trc.Trace("B : " + repr(self.ccreadings[idx]) + " for " + repr(delta.seconds) + " seconds")
timeInHours = delta.seconds * onesecond
kwhUsage = timeInHours * self.ccreadings[idx]
trc.Trace(" " + repr(kwhUsage))
totalUsage += kwhUsage
else:
break
trc.Trace("dateloReading : " + repr(dateloReading))
trc.Trace("datehiReading : " + repr(datehiReading))
if dateloReading is None:
dateloReading = 0
if datehiReading is None:
self.dlgOpen = True
nDlg = wx.MessageDialog(self.guicallback,
"Between " + datelo.strftime("%d/%m/%y %H:%M.%S") +
" and " + datehi.strftime("%d/%m/%y %H:%M.%S") + "\n" +
" you used 0 units of electricity \n" +
" which cost you 0p",
"CurrentCost",
style=(wx.OK | wx.ICON_INFORMATION))
nDlg.ShowModal()
nDlg.Destroy()
self.dlgOpen = False
else:
datehiReading += 1
if datehiReading >= len(self.ccdates):
datehiReading = len(self.ccdates) - 1
trc.Trace("onselect : " + repr(datelo) + " -> " + repr(datehi))
trc.Trace("closest matches : " + repr(self.ccdates[dateloReading]) + " -> " + repr(self.ccdates[datehiReading]))
trc.Trace(repr(dateloReading) + " | " + repr(datehiReading))
numUnits = "%.5f" % totalUsage
#
costPerUnit = self.appDatabase.RetrieveSetting("kwhcost")
if costPerUnit is None:
self.dlgOpen = True
nDlg = wx.MessageDialog(self.guicallback,
"Between " + self.ccdates[dateloReading].strftime("%d/%m/%y %H:%M.%S") +
" and " + self.ccdates[datehiReading].strftime("%d/%m/%y %H:%M.%S") + "\n" +
" you used " + numUnits + " units of electricity",
"CurrentCost",
style=(wx.OK | wx.ICON_INFORMATION))
nDlg.ShowModal()
nDlg.Destroy()
self.dlgOpen = False
else:
costUnits = "%.3f" % (float(costPerUnit) * totalUsage)
trc.Trace("cost of a unit : " + repr(float(costPerUnit)))
self.dlgOpen = True
nDlg = wx.MessageDialog(self.guicallback,
"Between " + self.ccdates[dateloReading].strftime("%d/%m/%y %H:%M.%S") +
" and " + self.ccdates[datehiReading].strftime("%d/%m/%y %H:%M.%S") + "\n" +
" you used " + numUnits + " units of electricity \n" +
" which cost you approximately " + costUnits + "p",
"CurrentCost",
style=(wx.OK | wx.ICON_INFORMATION))
nDlg.ShowModal()
nDlg.Destroy()
self.dlgOpen = False
trc.FunctionExit("currentcostlivedata :: onselect")
# a background thread used to create an MQTT connection
class MQTTUpdateThread(Thread):
def __init__(self, mqttclienthandle, ipaddr, topic, liveagent):
Thread.__init__(self)
self.mqttClient = mqttclienthandle
self.ipaddress = ipaddr
self.topicstring = topic
self.graphhandle = liveagent
def run(self):
res = self.mqttClient.EstablishConnection(self.ipaddress,
self.topicstring,
self.graphhandle)
# a background thread used to create a serial connection
class SerialUpdateThread(Thread):
def __init__(self, comclienthandle, comportclass, liveagent):
Thread.__init__(self)
self.comClient = comclienthandle
self.comport = comportclass
self.graphhandle = liveagent
def run(self):
res = self.comClient.EstablishConnection(self.comport,
self.graphhandle)
# a background thread used to download National Grid data
class NationalGridUpdateThread(Thread):
disconnect = False
ngdata = None
def __init__(self, liveagent):
Thread.__init__(self)
self.graphhandle = liveagent
self.disconnect = False
self.ngdata = NationalGridDataSource()
def stopUpdates(self):
self.disconnect = True
def run(self):
while self.disconnect == False:
nghtml = self.ngdata.DownloadRealtimeHTML()
demand, freq = self.ngdata.ParseRealtimeHTML(nghtml)
self.graphhandle.updateNationalGridGraph(demand, freq)
| gpl-3.0 |
ffyu/Kaggle-Taxi-Travel-Time-Prediction | Submission.py | 1 | 43470 | # Model 1 - Scalable Random Forest
import re
import numpy as np
import pandas as pd
import sqlite3
from pandas.io import sql
from datetime import datetime
# global variable to indicate the folder for all input / output files
FOLDER = './data/'
# Convert [lon,lat] string to list
def lonlat_convert(lonlat):
lon = float(re.compile("[-+]?\d+.\d+").findall(lonlat)[0])
lat = float(re.compile("[-+]?\d+.\d+").findall(lonlat)[1])
combined = list()
combined.append(lon)
combined.append(lat)
return combined
# Get Haversine distance
def get_dist(lonlat1, lonlat2):
lon_diff = np.abs(lonlat1[0]-lonlat2[0])*np.pi/360.0
lat_diff = np.abs(lonlat1[1]-lonlat2[1])*np.pi/360.0
a = np.sin(lat_diff)**2 + np.cos(lonlat1[1]*np.pi/180.0) * np.cos(lonlat2[1]*np.pi/180.0) * np.sin(lon_diff)**2
d = 2*6371*np.arctan2(np.sqrt(a), np.sqrt(1-a))
return d
class CSVToSQL:
def __init__(self, folder, file_in, file_out):
self.folder = folder
self.file_in = file_in
self.file_out = file_out
def generate_sqlite(self):
print "Converting csv file to sqlite for train set:"
num_lines = sum(1 for line in open(self.folder+self.file_in))
columns = ['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAYTYPE', 'MISSING_DATA', 'POLYLINE']
con = sqlite3.connect(self.folder+self.file_out)
chunk_size = 5000
count = 1
for i in range(0, num_lines, chunk_size):
df = pd.read_csv(self.folder+self.file_in, header=None,
nrows=chunk_size, skiprows=i, low_memory=False)
df.columns = columns
sql.to_sql(df, name='train_data', con=con, index=False,
index_label='molecule_id', if_exists='append')
print "Batch No. {} completed".format(count)
count += 1
con.close()
# Delete the first row with duplicate column names
con = sqlite3.connect(self.folder+self.file_out)
c = con.cursor()
c.execute("DELETE FROM train_data WHERE TRIP_ID='TRIP_ID'")
con.commit()
con.close()
print "All completed!\n"
class TrainDescriptive:
def __init__(self, folder, file_in, file_out):
self.folder = folder
self.file_in = file_in
self.file_out = file_out
def transform(self):
# this function does the following
# 1) convert timestamp to time of the day, day of week, and month of year
# 2) convert polyline to snapshots
# 3) calculate the trip length
# 4) calculate the average speed
print "Generating training file with descriptive stats:"
# initialize the connection with the input and output sqlite file
con_in = sqlite3.connect(self.folder+self.file_in)
con_out = sqlite3.connect(self.folder+self.file_out)
con_in.text_factory = str
chunk_reader = pd.read_sql("SELECT * FROM train_data WHERE MISSING_DATA!='1'",
con_in, chunksize=5000)
count = 1
for chunk in chunk_reader:
print 'Chunk {} started:'.format(count)
chunk['Time_of_Day'] = chunk.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(float(x)).hour +
datetime.utcfromtimestamp(float(x)).minute/60.0 +
datetime.utcfromtimestamp(float(x)).second/3600.0)
chunk['Hour_of_Day'] = chunk.Time_of_Day.map(lambda x: np.round(x))
chunk['Day_of_Week'] = chunk.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(float(x)).weekday())
chunk['Month_of_Year'] = chunk.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(float(x)).month)
chunk['POLYLINE_Split'] = chunk.POLYLINE.map(lambda x:
re.compile("\[[-+]?\d+.\d+,[-+]?\d+.\d+\]").findall(x))
chunk['Snapshots'] = chunk.POLYLINE_Split.map(lambda x: len(x))
chunk = chunk[chunk.Snapshots > 10]
chunk['Start_Point'] = chunk.POLYLINE_Split.map(lambda x: lonlat_convert(x[0]))
chunk['End_Point'] = chunk.POLYLINE_Split.map(lambda x: lonlat_convert(x[-1]))
chunk['Trip_Length'] = pd.DataFrame([get_dist(chunk.iloc[i].Start_Point, chunk.iloc[i].End_Point)
for i in range(len(chunk))])
chunk['Avg_Speed'] = chunk['Trip_Length']*1000.0 / ((chunk['Snapshots']-1)*15)
chunk.drop(['POLYLINE', 'POLYLINE_Split', 'Start_Point', 'End_Point'], axis=1, inplace=True)
sql.to_sql(chunk,
name='train_data',
con=con_out,
index=False,
index_label='molecule_id',
if_exists='append')
print 'Chunk {} completed!'.format(count)
count += 1
con_in.close()
con_out.close()
print "All completed!\n"
def descriptive_hour(self):
# generate descriptive file for each hour of the day
print "Generating descriptive stats for each hour:"
con = sqlite3.connect(self.folder+self.file_out)
df_hourly = pd.read_sql("""
SELECT CAST(Hour_of_Day AS INTEGER) AS Hour_of_Day,
(AVG(Snapshots)-1)*15 AS avg_trip_time,
AVG(Trip_Length) AS avg_trip_length,
COUNT(*) AS trip_count,
AVG(Avg_Speed) AS avg_speed_per_trip
FROM train_data
GROUP BY Hour_of_Day
""", con)
df_hourly.to_csv(self.folder+'Descriptive_Hour.csv', index=False)
con.close()
print "Completed!\n"
def descriptive_weekday(self):
# generate descriptive file for each weekday
print "Generating descriptive stats for each weekday:"
con = sqlite3.connect(self.folder+self.file_out)
df_weekday = pd.read_sql("""
SELECT Day_of_Week,
(AVG(Snapshots)-1)*15 AS avg_trip_time,
AVG(Trip_Length) AS avg_trip_length,
COUNT(*) AS trip_count,
AVG(Avg_Speed) AS avg_speed_per_trip
FROM train_data
GROUP BY Day_of_Week
""", con)
df_weekday.to_csv(self.folder+'Descriptive_Weekday.csv', index=False)
con.close()
print "Completed!\n"
def descriptive_month(self):
# generate descriptive file for each month
print "Generating descriptive stats for each month:"
con = sqlite3.connect(self.folder+self.file_out)
df_month = pd.read_sql("""
SELECT Month_of_Year,
(AVG(Snapshots)-1)*15 AS avg_trip_time,
AVG(Trip_Length) AS avg_trip_length,
COUNT(*) AS trip_count,
AVG(Avg_Speed) AS avg_speed_per_trip
FROM train_data
GROUP BY Month_of_Year
""", con)
df_month.to_csv(self.folder+'Descriptive_Month.csv', index=False)
con.close()
print "Completed!\n"
def descriptive_driver(self):
# generate descriptive file for each driver id
print "Generating descriptive stats for each driver:"
con = sqlite3.connect(self.folder+self.file_out)
df_driver = pd.read_sql("""
SELECT TAXI_ID, (avg(Snapshots)-1)*15 AS avg_trip_time,
AVG(Trip_Length) AS avg_trip_length,
COUNT(*) AS trip_count,
AVG(Avg_Speed) AS avg_speed_per_trip
FROM train_data
GROUP BY TAXI_ID
""", con)
df_driver.to_csv(self.folder+'Descriptive_Driver.csv', index=False)
con.close()
print "Completed!\n"
def descriptive_stand(self):
# generate descriptive file for each taxi stand
print "Generating descriptive stats for each taxi stand:"
con = sqlite3.connect(self.folder+self.file_out)
df_stand = pd.read_sql("""
SELECT CAST(ORIGIN_STAND AS INTEGER) AS ORIGIN_STAND,
(AVG(Snapshots)-1)*15 AS avg_trip_time,
AVG(Trip_Length) AS avg_trip_length,
COUNT(*) AS trip_count,
AVG(Avg_Speed) AS avg_speed_per_trip
FROM train_data
WHERE CALL_TYPE='B'
GROUP BY CAST(ORIGIN_STAND AS INTEGER)
""", con)
df_stand.to_csv(self.folder+'Descriptive_Stand.csv', index=False)
con.close()
print "Completed!\n"
def descriptive_caller(self):
# generate descriptive file for each caller id
print "Generating descriptive stats for each caller:"
con = sqlite3.connect(self.folder+self.file_out)
df_caller = pd.read_sql("""
SELECT CAST(ORIGIN_CALL AS INTEGER) AS ORIGIN_CALL,
(AVG(Snapshots)-1)*15 AS avg_trip_time,
AVG(Trip_Length) AS avg_trip_length,
COUNT(*) AS trip_count,
AVG(Avg_Speed) AS avg_speed_per_trip
FROM train_data
WHERE CALL_TYPE='A'
GROUP BY CAST(ORIGIN_CALL AS INTEGER)
""", con)
df_caller.to_csv(self.folder+'Descriptive_Caller.csv', index=False)
con.close()
print "Completed!\n"
class TrainPreProcessing:
def __init__(self, folder, file_in, file_out):
# initialize folder and input / output file names
self.folder = folder
self.file_in = file_in
self.file_out = file_out
def train_pre_process(self):
# read in the training set by chunks, and add engineered features
print "Pre-processing the training set:"
chunk_reader = pd.read_csv(self.folder+self.file_in, chunksize=5000)
count = 1
for chunk in chunk_reader:
print "Chunk No.{} started:".format(count)
# reset index
chunk = chunk[chunk.MISSING_DATA == False]
chunk.reset_index(inplace=True)
# split the polyline and calculate actual snapshots and travel time
chunk['POLYLINE_Split'] = chunk.POLYLINE.map(lambda x:
re.compile("\[[-+]?\d+.\d+,[-+]?\d+.\d+\]").findall(x))
chunk['Snapshots'] = chunk.POLYLINE_Split.map(lambda x: len(x))
chunk = pd.DataFrame(chunk[chunk.Snapshots > 10])
chunk['Travel_Time'] = chunk['Snapshots'].map(lambda x: (x-1)*15)
# Randomly truncate to match the format of the test data
def truncate_func(row):
path_len = np.random.randint(1, row['Snapshots'])
return tuple(row['POLYLINE_Split'][:path_len])
chunk['POLYLINE_Split_Truncated'] = chunk.apply(truncate_func, axis=1)
# Delete/rename columns
chunk.drop(['POLYLINE', 'POLYLINE_Split'], axis=1, inplace=True)
chunk.rename(columns={'POLYLINE_Split_Truncated': 'POLYLINE_Split'}, inplace=True)
# Add dummies for CALL_TYPE
chunk = pd.concat([chunk, pd.get_dummies(chunk.CALL_TYPE, prefix='Call_Type_')], axis=1)
# Deal with time stamp
chunk['Time_of_Day'] = chunk.TIMESTAMP.map(lambda x:
datetime.utcfromtimestamp(float(x)).hour +
datetime.utcfromtimestamp(float(x)).minute/60.0 +
datetime.utcfromtimestamp(float(x)).second/3600.0)
chunk['Hour_of_Day'] = chunk.Time_of_Day.map(lambda x: np.round(x)).astype(int)
chunk['Day_of_Week'] = chunk.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(float(x)).weekday())
chunk['Month_of_Year'] = chunk.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(float(x)).month)
# Read in description for hour of the day
file1 = 'Descriptive_Hour.csv'
df_hour = pd.read_csv(self.folder+file1, header=False)
# Join by hour of the day
chunk = pd.merge(chunk, df_hour, on='Hour_of_Day')
chunk['Hour_TT'] = chunk.avg_trip_time
chunk['Hour_TL'] = chunk.avg_trip_length
chunk['Hour_TC'] = chunk.trip_count
chunk['Hour_TS'] = chunk.avg_speed_per_trip
chunk.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for day of the week
file2 = 'Descriptive_Weekday.csv'
df_weekday = pd.read_csv(self.folder+file2, header=False)
# Join by day of the week
chunk = pd.merge(chunk, df_weekday, on='Day_of_Week')
chunk['Weekday_TT'] = chunk.avg_trip_time
chunk['Weekday_TL'] = chunk.avg_trip_length
chunk['Weekday_TC'] = chunk.trip_count
chunk['Weekday_TS'] = chunk.avg_speed_per_trip
chunk.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for month of the year
file3 = 'Descriptive_Month.csv'
df_month = pd.read_csv(self.folder+file3, header=False)
# Join by month of the year
chunk = pd.merge(chunk, df_month, on='Month_of_Year')
chunk['Month_TT'] = chunk.avg_trip_time
chunk['Month_TL'] = chunk.avg_trip_length
chunk['Month_TC'] = chunk.trip_count
chunk['Month_TS'] = chunk.avg_speed_per_trip
chunk.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for driver id
file4 = 'Descriptive_Driver.csv'
df_driver = pd.read_csv(self.folder+file4, header=False)
# Join by driver id
chunk = pd.merge(chunk, df_driver, on='TAXI_ID')
chunk['Driver_TT'] = chunk.avg_trip_time
chunk['Driver_TL'] = chunk.avg_trip_length
chunk['Driver_TC'] = chunk.trip_count
chunk['Driver_TS'] = chunk.avg_speed_per_trip
chunk.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for stand id
file5 = 'Descriptive_Stand.csv'
df_stand = pd.read_csv(self.folder+file5, header=False)
# Left Join by stand id
chunk = pd.merge(chunk, df_stand, how='left', on=['ORIGIN_STAND', 'ORIGIN_STAND'])
chunk['Stand_TT'] = chunk.avg_trip_time
chunk['Stand_TL'] = chunk.avg_trip_length
chunk['Stand_TC'] = chunk.trip_count
chunk['Stand_TS'] = chunk.avg_speed_per_trip
chunk.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for caller id
file6 = 'Descriptive_Caller.csv'
df_caller = pd.read_csv(self.folder+file6, header=False)
# Left Join by caller id
chunk = pd.merge(chunk, df_caller, how='left', on=['ORIGIN_CALL', 'ORIGIN_CALL'])
chunk['Caller_TT'] = chunk.avg_trip_time
chunk['Caller_TL'] = chunk.avg_trip_length
chunk['Caller_TC'] = chunk.trip_count
chunk['Caller_TS'] = chunk.avg_speed_per_trip
chunk.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# If stand id is null, we assign grand average to the stand description
chunk.loc[chunk.ORIGIN_STAND.isnull(), 'Stand_TT'] = 671.847205828125
chunk.loc[chunk.ORIGIN_STAND.isnull(), 'Stand_TL'] = 3.41625640673437
chunk.loc[chunk.ORIGIN_STAND.isnull(), 'Stand_TC'] = 12459.53125
chunk.loc[chunk.ORIGIN_STAND.isnull(), 'Stand_TS'] = 6.77996522545313
# If caller id is null, we assign average numbers to the caller description
chunk.loc[chunk.ORIGIN_CALL.isnull(), 'Caller_TT'] = 769.644426032955
chunk.loc[chunk.ORIGIN_CALL.isnull(), 'Caller_TL'] = 3.45908442749228
chunk.loc[chunk.ORIGIN_CALL.isnull(), 'Caller_TC'] = 6.33404623868778
chunk.loc[chunk.ORIGIN_CALL.isnull(), 'Caller_TS'] = 5.92595987288811
# If there are still null values for stand descriptions
chunk.loc[chunk.Stand_TT.isnull(), 'Stand_TT'] = 671.847205828125
chunk.loc[chunk.Stand_TL.isnull(), 'Stand_TL'] = 3.41625640673437
chunk.loc[chunk.Stand_TC.isnull(), 'Stand_TC'] = 12459.53125
chunk.loc[chunk.Stand_TS.isnull(), 'Stand_TS'] = 6.77996522545313
# If there are still null values for caller descriptions
chunk.loc[chunk.Caller_TT.isnull(), 'Caller_TT'] = 769.644426032955
chunk.loc[chunk.Caller_TL.isnull(), 'Caller_TL'] = 3.45908442749228
chunk.loc[chunk.Caller_TC.isnull(), 'Caller_TC'] = 6.33404623868778
chunk.loc[chunk.Caller_TS.isnull(), 'Caller_TS'] = 5.92595987288811
# Add start speed (if less than 2 snapshots, use average start speed)
def get_start_speed(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 2:
return None
else:
Lonlat_first = lonlat_convert(POLYLINE_Split[0])
Lonlat_second = lonlat_convert(POLYLINE_Split[1])
start_speed = get_dist(Lonlat_first, Lonlat_second) * 1000.0 / 15.0
return start_speed
chunk['Start_Speed'] = chunk.POLYLINE_Split.map(lambda x: get_start_speed(x))
# Add end speed (if less than 2 snapshots, use average end speed)
def get_end_speed(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 2:
return None
else:
Lonlat_last_but_one = lonlat_convert(POLYLINE_Split[num-2])
Lonlat_last = lonlat_convert(POLYLINE_Split[num-1])
end_speed = get_dist(Lonlat_last_but_one, Lonlat_last) * 1000.0 / 15.0
return end_speed
chunk['End_Speed'] = chunk.POLYLINE_Split.map(lambda x: get_end_speed(x))
# Add average speed (if less than 2 snapshots, use average average speed
def get_avg_speed(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 2:
return None
else:
speeds = []
for i in range(num-1):
Lonlat_one = lonlat_convert(POLYLINE_Split[i])
Lonlat_two = lonlat_convert(POLYLINE_Split[i+1])
speed = get_dist(Lonlat_one, Lonlat_two) * 1000.0 / 15.0
speeds.append(speed)
return np.mean(speeds)
chunk['Avg_Speed'] = chunk.POLYLINE_Split.map(lambda x: get_avg_speed(x))
# Add start speed two
def get_start_speed_two(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 3:
return None
else:
Lonlat_second = lonlat_convert(POLYLINE_Split[1])
Lonlat_third = lonlat_convert(POLYLINE_Split[2])
start_speed_two = get_dist(Lonlat_second, Lonlat_third) * 1000.0 / 15.0
return start_speed_two
chunk['Start_Speed_two'] = chunk.POLYLINE_Split.map(lambda x: get_start_speed_two(x))
# Add end speed two
def get_end_speed_two(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 3:
return None
else:
Lonlat_last_but_two = lonlat_convert(POLYLINE_Split[num-3])
Lonlat_last_but_one = lonlat_convert(POLYLINE_Split[num-2])
end_speed_two = get_dist(Lonlat_last_but_two, Lonlat_last_but_one) * 1000.0 / 15.0
return end_speed_two
chunk['End_Speed_two'] = chunk.POLYLINE_Split.map(lambda x: get_end_speed_two(x))
# Add current snapshots
chunk['Current_Snapshots'] = chunk.POLYLINE_Split.map(lambda x: len(x))
chunk['Current_Snapshots_log'] = chunk.POLYLINE_Split.map(lambda x: np.log(len(x)+1))
# This is for generating the cleaned training set
chunk_out = chunk[['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID', 'TIMESTAMP',
'Call_Type__A', 'Call_Type__B', 'Call_Type__C',
'Time_of_Day', 'Hour_of_Day', 'Day_of_Week', 'Month_of_Year',
'Hour_TT', 'Hour_TL', 'Hour_TC', 'Hour_TS',
'Weekday_TT', 'Weekday_TL', 'Weekday_TC', 'Weekday_TS',
'Month_TT', 'Month_TL', 'Month_TC', 'Month_TS',
'Driver_TT', 'Driver_TL', 'Driver_TC', 'Driver_TS',
'Stand_TT', 'Stand_TL', 'Stand_TC', 'Stand_TS',
'Caller_TT', 'Caller_TL', 'Caller_TC', 'Caller_TS',
'Start_Speed', 'End_Speed', 'Avg_Speed', 'Start_Speed_two', 'End_Speed_two',
'Current_Snapshots', 'Current_Snapshots_log',
'Snapshots', 'Travel_Time', 'POLYLINE_Split']]
chunk = []
if count == 1:
chunk_out.to_csv(self.folder+'train_cleaned_temp.csv', mode='a', index=False)
else:
chunk_out.to_csv(self.folder+'train_cleaned_temp.csv', mode='a', header=False, index=False)
print 'Chunk No.{} completed!'.format(count)
count += 1
print "All completed!\n"
def fix_null(self):
# fix the null values of speed variables using grand average
print "Fixing null values in speed variables:"
speed_dict = {'Start_Speed': 2.255119,
'End_Speed': 7.652231,
'Avg_Speed': 6.905948,
'Start_Speed_Two': 4.302278,
'End_Speed_Two': 7.619596}
chunk_reader = pd.read_csv(self.folder+'train_cleaned_temp.csv', chunksize=10000)
count = 1
for chunk in chunk_reader:
print 'Chunk No.{} started:'.format(count)
chunk = chunk[(chunk.Start_Speed <= 40) & (chunk.End_Speed <= 40) & (chunk.Avg_Speed <= 40) &
(chunk.Start_Speed_two <= 40) & (chunk.End_Speed_two <= 40) & (chunk.Current_Snapshots < 1000)]
chunk.reset_index(inplace=True)
chunk.drop(['index'], axis=1, inplace=True)
chunk.loc[chunk.Start_Speed.isnull(), 'Start_Speed'] = speed_dict['Start_Speed']
chunk.loc[chunk.End_Speed.isnull(), 'End_Speed'] = speed_dict['End_Speed']
chunk.loc[chunk.Avg_Speed.isnull(), 'Avg_Speed'] = speed_dict['Avg_Speed']
chunk.loc[chunk.Start_Speed_two.isnull(), 'Start_Speed_two'] = speed_dict['Start_Speed_Two']
chunk.loc[chunk.End_Speed_two.isnull(), 'End_Speed_two'] = speed_dict['End_Speed_Two']
# Save the changes to a new training file
if count == 1:
chunk.to_csv(self.folder+self.file_out, mode='a', index=False)
else:
chunk.to_csv(self.folder+self.file_out, mode='a', header=False, index=False)
print 'Chunk No.{} completed!'.format(count)
count += 1
print "All completed!\n"
class TestPreProcessing:
def __init__(self, folder, file_in, file_out):
self.folder = folder
self.file_in = file_in
self.file_out = file_out
def test_pre_process(self):
print "Pre-processing the training set:"
# perform the same feature engineering as the training set
df_Test = pd.read_csv(self.folder+self.file_in)
# Need to keep track the the trip_id
df_Test.reset_index(inplace=True)
# Add dummies for CALL_TYPE
df_Test = pd.concat([df_Test, pd.get_dummies(df_Test.CALL_TYPE, prefix='Call_Type_')], axis=1)
# Deal with time stamp
df_Test['Time_of_Day'] = df_Test.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(x).hour +
datetime.utcfromtimestamp(float(x)).minute/60.0 +
datetime.utcfromtimestamp(float(x)).second/3600.0)
df_Test['Hour_of_Day'] = df_Test.Time_of_Day.map(lambda x: np.round(x)).astype(int)
df_Test['Day_of_Week'] = df_Test.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(float(x)).weekday())
df_Test['Month_of_Year'] = df_Test.TIMESTAMP.map(lambda x: datetime.utcfromtimestamp(float(x)).month)
# Read in description for hour of the day
file1 = 'Descriptive_Hour.csv'
df_hour = pd.read_csv(self.folder+file1, header=False)
# Join by hour of the day
df_Test = pd.merge(df_Test, df_hour, on='Hour_of_Day')
df_Test['Hour_TT'] = df_Test.avg_trip_time
df_Test['Hour_TL'] = df_Test.avg_trip_length
df_Test['Hour_TC'] = df_Test.trip_count
df_Test['Hour_TS'] = df_Test.avg_speed_per_trip
df_Test.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for day of the week
file2 = 'Descriptive_Weekday.csv'
df_weekday = pd.read_csv(self.folder+file2, header=False)
# Join by day of week
df_Test = pd.merge(df_Test, df_weekday, on='Day_of_Week')
df_Test['Weekday_TT'] = df_Test.avg_trip_time
df_Test['Weekday_TL'] = df_Test.avg_trip_length
df_Test['Weekday_TC'] = df_Test.trip_count
df_Test['Weekday_TS'] = df_Test.avg_speed_per_trip
df_Test.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for month of the year
file3 = 'Descriptive_Month.csv'
df_month = pd.read_csv(self.folder+file3, header=False)
# Join by month of year
df_Test = pd.merge(df_Test, df_month, on='Month_of_Year')
df_Test['Month_TT'] = df_Test.avg_trip_time
df_Test['Month_TL'] = df_Test.avg_trip_length
df_Test['Month_TC'] = df_Test.trip_count
df_Test['Month_TS'] = df_Test.avg_speed_per_trip
df_Test.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for driver id
file4 = 'Descriptive_Driver.csv'
df_driver = pd.read_csv(self.folder+file4, header=False)
# Join by driver id
df_Test = pd.merge(df_Test, df_driver, on='TAXI_ID')
df_Test['Driver_TT'] = df_Test.avg_trip_time
df_Test['Driver_TL'] = df_Test.avg_trip_length
df_Test['Driver_TC'] = df_Test.trip_count
df_Test['Driver_TS'] = df_Test.avg_speed_per_trip
df_Test.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for stand id
file5 = 'Descriptive_Stand.csv'
df_stand = pd.read_csv(self.folder+file5, header=False)
# Left Join by stand id
df_Test = pd.merge(df_Test, df_stand, how='left', on=['ORIGIN_STAND', 'ORIGIN_STAND'])
df_Test['Stand_TT'] = df_Test.avg_trip_time
df_Test['Stand_TL'] = df_Test.avg_trip_length
df_Test['Stand_TC'] = df_Test.trip_count
df_Test['Stand_TS'] = df_Test.avg_speed_per_trip
df_Test.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# Read in description for caller id
file6 = 'Descriptive_Caller.csv'
df_caller = pd.read_csv(self.folder+file6, header=False)
# Left Join by caller id
df_Test = pd.merge(df_Test, df_caller, how='left', on=['ORIGIN_CALL', 'ORIGIN_CALL'])
df_Test['Caller_TT'] = df_Test.avg_trip_time
df_Test['Caller_TL'] = df_Test.avg_trip_length
df_Test['Caller_TC'] = df_Test.trip_count
df_Test['Caller_TS'] = df_Test.avg_speed_per_trip
df_Test.drop(['avg_trip_time', 'avg_trip_length', 'trip_count', 'avg_speed_per_trip'], axis=1, inplace=True)
# If stand id is null, we assign average numbers to the stand description
df_Test.loc[df_Test.ORIGIN_STAND.isnull(), 'Stand_TT'] = 671.847205828125
df_Test.loc[df_Test.ORIGIN_STAND.isnull(), 'Stand_TL'] = 3.41625640673437
df_Test.loc[df_Test.ORIGIN_STAND.isnull(), 'Stand_TC'] = 12459.53125
df_Test.loc[df_Test.ORIGIN_STAND.isnull(), 'Stand_TS'] = 6.77996522545313
# If caller id is null, we assign average numbers to the caller description
df_Test.loc[df_Test.ORIGIN_CALL.isnull(), 'Caller_TT'] = 769.644426032955
df_Test.loc[df_Test.ORIGIN_CALL.isnull(), 'Caller_TL'] = 3.45908442749228
df_Test.loc[df_Test.ORIGIN_CALL.isnull(), 'Caller_TC'] = 6.33404623868778
df_Test.loc[df_Test.ORIGIN_CALL.isnull(), 'Caller_TS'] = 5.92595987288811
# four special cases here
df_Test.loc[df_Test.Caller_TL.isnull(), 'Caller_TL'] = 3.45908442749228
df_Test.loc[df_Test.Caller_TS.isnull(), 'Caller_TS'] = 5.92595987288811
df_Test.loc[df_Test.Caller_TT.isnull(), 'Caller_TT'] = 769.644426032955
df_Test.loc[df_Test.Caller_TC.isnull(), 'Caller_TC'] = 6.33404623868778
# Don't forget this step!
df_Test['POLYLINE_Split'] = df_Test.POLYLINE.map(lambda x:
re.compile("\[[-+]?\d+.\d+,[-+]?\d+.\d+\]").findall(x))
speed_dict = {'Start_Speed': 2.255119,
'End_Speed': 7.652231,
'Avg_Speed': 6.905948,
'Start_Speed_Two': 4.302278,
'End_Speed_Two': 7.619596}
# Add start speed (if less than 2 snapshots, use average start speed)
def get_start_speed(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 2:
return None
else:
Lonlat_first = lonlat_convert(POLYLINE_Split[0])
Lonlat_second = lonlat_convert(POLYLINE_Split[1])
start_speed = get_dist(Lonlat_first, Lonlat_second) * 1000.0 / 15.0
return start_speed
df_Test['Start_Speed'] = df_Test.POLYLINE_Split.map(lambda x: get_start_speed(x))
df_Test.loc[df_Test.Start_Speed.isnull(), 'Start_Speed'] = speed_dict['Start_Speed']
# Add end speed (if less than 2 snapshots, use average end speed)
def get_end_speed(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 2:
return None
else:
Lonlat_last_but_one = lonlat_convert(POLYLINE_Split[num-2])
Lonlat_last = lonlat_convert(POLYLINE_Split[num-1])
end_speed = get_dist(Lonlat_last_but_one, Lonlat_last) * 1000.0 / 15.0
return end_speed
df_Test['End_Speed'] = df_Test.POLYLINE_Split.map(lambda x: get_end_speed(x))
df_Test.loc[df_Test.End_Speed.isnull(), 'End_Speed'] = speed_dict['End_Speed']
# Add average speed (if less than 2 snapshots, use average average speed
def get_avg_speed(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 2:
return None
else:
speeds = []
for i in range(num-1):
Lonlat_one = lonlat_convert(POLYLINE_Split[i])
Lonlat_two = lonlat_convert(POLYLINE_Split[i+1])
speed = get_dist(Lonlat_one, Lonlat_two)*1000.0/15.0
speeds.append(speed)
return np.mean(speeds)
df_Test['Avg_Speed'] = df_Test.POLYLINE_Split.map(lambda x: get_avg_speed(x))
df_Test.loc[df_Test.Avg_Speed.isnull(), 'Avg_Speed'] = speed_dict['Avg_Speed']
# Add Start_Speed_two
def get_start_speed_two(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 3:
return None
else:
Lonlat_second = lonlat_convert(POLYLINE_Split[1])
Lonlat_third = lonlat_convert(POLYLINE_Split[2])
start_speed_two = get_dist(Lonlat_second, Lonlat_third) * 1000.0 / 15.0
return start_speed_two
df_Test['Start_Speed_two'] = df_Test.POLYLINE_Split.map(lambda x: get_start_speed_two(x))
df_Test.loc[df_Test.Start_Speed_two.isnull(), 'Start_Speed_two'] = speed_dict['Start_Speed_Two']
# Add End_Speed_two
def get_end_speed_two(POLYLINE_Split):
num = len(POLYLINE_Split)
if num < 3:
return None
else:
Lonlat_last_but_two = lonlat_convert(POLYLINE_Split[num-3])
Lonlat_last_but_one = lonlat_convert(POLYLINE_Split[num-2])
end_speed_two = get_dist(Lonlat_last_but_two, Lonlat_last_but_one)*1000.0/15.0
return end_speed_two
df_Test['End_Speed_two'] = df_Test.POLYLINE_Split.map(lambda x: get_end_speed_two(x))
df_Test.loc[df_Test.End_Speed_two.isnull(), 'End_Speed_two'] = speed_dict['End_Speed_Two']
# Add current snapshots
df_Test['Current_Snapshots'] = df_Test.POLYLINE_Split.map(lambda x: len(x))
df_Test['Current_Snapshots_log'] = df_Test.POLYLINE_Split.map(lambda x: np.log(len(x)+1))
df_Test.sort(['index'], ascending=1, inplace=True)
df_Test.reset_index(inplace=True)
df_Test.drop(['level_0', 'index'], axis=1, inplace=True)
# For test examples, prepare the X and y
X_test_with_id = df_Test[['TRIP_ID', 'Call_Type__A', 'Call_Type__B', 'Call_Type__C',
'Time_of_Day', 'Hour_of_Day', 'Day_of_Week', 'Month_of_Year',
'Hour_TT', 'Hour_TL', 'Hour_TC', 'Hour_TS',
'Weekday_TT', 'Weekday_TL', 'Weekday_TC', 'Weekday_TS',
'Month_TT', 'Month_TL', 'Month_TC', 'Month_TS',
'Driver_TT', 'Driver_TL', 'Driver_TC', 'Driver_TS',
'Stand_TT', 'Stand_TL', 'Stand_TC', 'Stand_TS',
'Caller_TT', 'Caller_TL', 'Caller_TC', 'Caller_TS',
'Start_Speed', 'End_Speed', 'Avg_Speed', 'Start_Speed_two', 'End_Speed_two',
'Current_Snapshots', 'Current_Snapshots_log']]
X_test_with_id.to_csv(self.folder+self.file_out, index=False)
print "Completed!\n"
class ScalableRandomForest:
def __init__(self, num_trees, q, folder, train_file, test_file):
# num_trees is used to determine number of trees in random forest
# it is also the same value used to decide number of partitions for total data set
self.num_trees = num_trees
self.q = q
self.folder = folder
self.train_file = train_file
self.test_file = test_file
self.split_files = None
def create_file_names(self):
# Create file name for each split database
self.split_files = []
for i in range(self.num_trees):
self.split_files.append('Train_Part{}.csv'.format(i))
def split_train(self):
# split the training set into same number of partitions as the number of trees
print "Splitting the training set:"
chunk_reader = pd.read_csv(self.folder+self.train_file, chunksize=50000)
count = 1
for chunk in chunk_reader:
print 'Chunk No.{} started:'.format(count)
chunk = chunk[(chunk.Start_Speed <= 40) & (chunk.End_Speed <= 40) & (chunk.Avg_Speed <= 40) &
(chunk.Start_Speed_two <= 40) & (chunk.End_Speed_two <= 40) & (chunk.Current_Snapshots < 1000)]
# Add row number
chunk.reset_index(inplace=True)
chunk['ind'] = chunk.index.astype(int)
chunk.drop(['index'], axis=1, inplace=True)
for i in range(self.num_trees):
sample = pd.DataFrame(chunk[chunk.ind % self.num_trees == i])
sample.drop(['ind'], axis=1, inplace=True)
# First chunk, keep the header
if count == 1:
sample.to_csv(self.folder+self.split_files[i], index=False, mode='a')
# Second and later chunk, don't keep the header
else:
sample.to_csv(self.folder+self.split_files[i], index=False, mode='a', header=False)
print 'Chunk No.{} completed!'.format(count)
count += 1
print "All completed!\n"
def train_predict_by_partition(self):
# for each split data set, train with a base tree
from sklearn.ensemble import RandomForestRegressor
print "Training Scalable Random Forest:"
for i in range(self.num_trees):
print "Partition {} started:".format(i)
df = pd.read_csv(self.folder+self.split_files[i])
df_X = df[['Call_Type__A', 'Call_Type__B', 'Call_Type__C',
'Time_of_Day', 'Hour_of_Day', 'Day_of_Week', 'Month_of_Year',
'Hour_TT', 'Hour_TL', 'Hour_TC', 'Hour_TS',
'Weekday_TT', 'Weekday_TL', 'Weekday_TC', 'Weekday_TS',
'Month_TT', 'Month_TL', 'Month_TC', 'Month_TS',
'Driver_TT', 'Driver_TL', 'Driver_TC', 'Driver_TS',
'Stand_TT', 'Stand_TL', 'Stand_TC', 'Stand_TS',
'Caller_TT', 'Caller_TL', 'Caller_TC', 'Caller_TS',
'Start_Speed', 'End_Speed', 'Avg_Speed', 'Start_Speed_two', 'End_Speed_two',
'Current_Snapshots', 'Current_Snapshots_log']]
y = np.log(df['Travel_Time']).values
# Randomly select subset of q*num_features attributes
column_list = df_X.columns.tolist()
num_features = len(column_list)
ind_selected = np.random.permutation(num_features)[:int(num_features * self.q)]
feature_selected = [column_list[k] for k in ind_selected]
# Prepare X
X = df_X[feature_selected].values
# Prepare X_test
df_test = pd.read_csv(self.folder+self.test_file)
X_test = df_test[feature_selected].values
# Prepare test id
IDs = df_test['TRIP_ID'].values
# Train the single tree
maxFeatures = int(np.sqrt(num_features))
clf = RandomForestRegressor(n_estimators=100, bootstrap=False, max_features=maxFeatures)
clf = clf.fit(X, y)
# Predict the test
y_test = np.exp(clf.predict(X_test))
# Save the predictions
result = np.c_[IDs, y_test]
df_result = pd.DataFrame(result, columns=['TRIP_ID', 'TRAVEL_TIME'])
df_result.to_csv(self.folder+'submission_train{}.csv'.format(i), index=False)
print "Partition {} training & prediction completed!".format(i)
print "All completed!"
def ensemble_all_partitions(self):
# ensemble all the prediction results using each partition
# get prediction file names list
print "Ensemble all predictions:"
prediction_files = []
num_files = self.num_trees
for i in range(num_files):
prediction_files.append("submission_train{}.csv".format(i))
# Read each files
dfs = []
for i in range(num_files):
dfs.append(pd.read_csv(self.folder+prediction_files[i]))
IDs = dfs[0]['TRIP_ID'].values
Travel_Time = pd.DataFrame(columns=['TRAVEL_TIME'])
for i in range(num_files):
Travel_Time = pd.concat([Travel_Time['TRAVEL_TIME'], dfs[i]['TRAVEL_TIME']], axis=1)
# Read the original test file
df_test = pd.read_csv(self.folder+self.test_file)
# Ensemble all results
y_test = np.maximum(np.mean(Travel_Time, axis=1), (df_test['Current_Snapshots']-1)*15)
result = np.c_[IDs, np.around(y_test).astype(int)]
df_result = pd.DataFrame(result, columns=['TRIP_ID', 'TRAVEL_TIME'])
df_result.to_csv(self.folder+'submission_final.csv', index=False)
print "Completed!"
def main():
# step 1 - convert raw training data to sqlite database
file_in = 'train.csv'
file_out = 'train.sqlite'
cts = CSVToSQL(FOLDER, file_in, file_out)
cts.generate_sqlite()
# step 2 - generate descriptive stats for train file
file_in = 'train.sqlite'
file_out = 'train_descriptive.sqlite'
td = TrainDescriptive(FOLDER, file_in, file_out)
td.transform()
td.descriptive_hour()
td.descriptive_weekday()
td.descriptive_month()
td.descriptive_driver()
td.descriptive_stand()
td.descriptive_caller()
# step 3 - pre-process the training data
file_in = 'train.csv'
file_out = 'train_final.csv'
tpp = TrainPreProcessing(folder=FOLDER, file_in=file_in, file_out=file_out)
tpp.train_pre_process()
tpp.fix_null()
# step 4 - pre-process the test data
file_in = 'test.csv'
file_out = 'test_final.csv'
tpp = TestPreProcessing(folder=FOLDER, file_in=file_in, file_out=file_out)
tpp.test_pre_process()
# step 5 - train the model with scalable random forest and predict for test set
srf = ScalableRandomForest(num_trees=1000, q=0.75, folder=FOLDER,
train_file='train_final.csv', test_file='test_final.csv')
srf.create_file_names()
srf.split_train()
srf.train_predict_by_partition()
srf.ensemble_all_partitions()
if __name__ == '__main__':
main() | mit |
eldar/pose-tensorflow | test_multiperson.py | 1 | 4667 | import argparse
import logging
import os
import numpy as np
import scipy.io
import scipy.ndimage
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.2f')
from util.config import load_config
from dataset.factory import create as create_dataset
from dataset.pose_dataset import Batch
from util.mscoco_util import pose_predict_with_gt_segm
from nnet.predict import *
from util import visualize
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
def test_net(visualise, cache_scoremaps, development):
logging.basicConfig(level=logging.INFO)
cfg = load_config()
dataset = create_dataset(cfg)
dataset.set_shuffle(False)
sm = SpatialModel(cfg)
sm.load()
draw_multi = PersonDraw()
from_cache = "cached_scoremaps" in cfg
if not from_cache:
sess, inputs, outputs = setup_pose_prediction(cfg)
if cache_scoremaps:
out_dir = cfg.scoremap_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
pairwise_stats = dataset.pairwise_stats
num_images = dataset.num_images if not development else min(10, dataset.num_images)
coco_results = []
for k in range(num_images):
print('processing image {}/{}'.format(k, num_images-1))
batch = dataset.next_batch()
cache_name = "{}.mat".format(batch[Batch.data_item].coco_id)
if not from_cache:
outputs_np = sess.run(outputs, feed_dict={inputs: batch[Batch.inputs]})
scmap, locref, pairwise_diff = extract_cnn_output(outputs_np, cfg, pairwise_stats)
if cache_scoremaps:
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
pose = argmax_pose_predict(scmap, locref, cfg.stride)
arrows = argmax_arrows_predict(scmap, locref, pairwise_diff, cfg.stride)
visualize.show_arrows(cfg, img, pose, arrows)
visualize.waitforbuttonpress()
continue
out_fn = os.path.join(out_dir, cache_name)
dict = {'scoremaps': scmap.astype('float32'),
'locreg_pred': locref.astype('float32'),
'pairwise_diff': pairwise_diff.astype('float32')}
scipy.io.savemat(out_fn, mdict=dict)
continue
else:
#cache_name = '1.mat'
full_fn = os.path.join(cfg.cached_scoremaps, cache_name)
mlab = scipy.io.loadmat(full_fn)
scmap = mlab["scoremaps"]
locref = mlab["locreg_pred"]
pairwise_diff = mlab["pairwise_diff"]
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
if visualise:
img = np.squeeze(batch[Batch.inputs]).astype('uint8')
#visualize.show_heatmaps(cfg, img, scmap, pose)
"""
# visualize part detections after NMS
visim_dets = visualize_detections(cfg, img, detections)
plt.imshow(visim_dets)
plt.show()
visualize.waitforbuttonpress()
"""
# """
visim_multi = img.copy()
draw_multi.draw(visim_multi, dataset, person_conf_multi)
plt.imshow(visim_multi)
plt.show()
visualize.waitforbuttonpress()
# """
if cfg.use_gt_segm:
coco_img_results = pose_predict_with_gt_segm(scmap, locref, cfg.stride, batch[Batch.data_item].gt_segm,
batch[Batch.data_item].coco_id)
coco_results += coco_img_results
if len(coco_img_results):
dataset.visualize_coco(coco_img_results, batch[Batch.data_item].visibilities)
if cfg.use_gt_segm:
with open('predictions_with_segm.json', 'w') as outfile:
json.dump(coco_results, outfile)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--novis', default=False, action='store_true')
parser.add_argument('--cache', default=False, action='store_true')
parser.add_argument('--dev', default=False, action='store_true')
args, unparsed = parser.parse_known_args()
test_net(not args.novis, args.cache, args.dev)
| lgpl-3.0 |
jakobworldpeace/scikit-learn | benchmarks/bench_saga.py | 45 | 8474 | """Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
| bsd-3-clause |
project-rig/network_tester | examples/bursting.py | 1 | 4718 | """In this example we attempt to discover the behaviour of the network when the
burstiness of the traffic is varied."""
import sys
import random
from network_tester import Experiment
e = Experiment(sys.argv[1])
###############################################################################
# Network description
###############################################################################
# We'll create a random network of a certain number of nodes
num_cores = 64
fan_out = 8
cores = [e.new_core() for _ in range(num_cores)]
flows = [e.new_flow(c, random.sample(cores, fan_out))
for c in cores]
###############################################################################
# Traffic description
###############################################################################
# We'll generate bursts of traffic every millisecond
e.burst_period = 1e-3
# We'll choose a particular number (and type) of packet to be sent each period
packets_per_period = 32
e.use_payload = True
# We'll run the experiment for a reasonable number of periods, allowing some
# warmup time for the network behaviour to stabilise and also adding some
# cooldown time to ensure all cores have finished recording before stopping
# traffic generation.
e.duration = e.burst_period * 100
e.warmup = e.burst_period * 10
e.cooldown = e.burst_period * 10
# In the experiment we'll generate bursts of traffic with the packets being
# sent in different sized bursts. We'll also repeat experiment with and without
# packet reinjection.
num_steps = 30
for reinject_packets in [False, True]:
for step in range(num_steps):
# Work out the proportion of the burst period over which we'll send the
# packets.
burst_duty = step / float(num_steps - 1)
# Work out the time between each packet being sent, we'll use this as
# the timestep for the traffic generator (which will generate one
# packet per timestep during the burst).
timestep = (e.burst_period * burst_duty) / packets_per_period
# Don't bother trying things with too-tight a timestep since the
# traffic generator cannot generate packets that fast.
if timestep < 2e-6:
continue
with e.new_group() as g:
e.reinject_packets = reinject_packets
e.burst_duty = burst_duty
e.timestep = timestep
# We'll add the duty and reinjection option to the results tables
g.add_label("duty", e.burst_duty)
g.add_label("reinject_packets", e.reinject_packets)
###############################################################################
# Running the experiment
###############################################################################
# Record various counter values
e.record_sent = True
e.record_blocked = True
e.record_received = True
e.record_local_multicast = True
e.record_external_multicast = True
e.record_dropped_multicast = True
# Run the experiment
results = e.run(ignore_deadline_errors=True)
# Alternatively, comment the above and uncoment the below to place the network
# using the (dumb) Hilbert placer.
# from rig.place_and_route.place.hilbert import place as hilbert_place
# results = e.run(ignore_deadline_errors=True, place=hilbert_place)
###############################################################################
# Result plotting
###############################################################################
totals = results.totals()
# Scale from 0.0 (nothing received) to 1.0 (every packet which was actually
# sent was received).
totals["received"] /= totals["sent"] * fan_out
# Scale from 0.0 (no packets were sent) to 1.0 (every packet we tried to send
# was sent without being blocked by backpressure).
totals["sent"] /= totals["sent"] + totals["blocked"]
# Scale from 0.0 (no packets dropped) to 1.0 (every MC packet routed was
# dropped).
totals["dropped_multicast"] /= (totals["local_multicast"] + totals["external_multicast"])
# Plot with matplotlib
import matplotlib.pyplot as plt
tr = totals[totals["reinject_packets"] == True]
tn = totals[totals["reinject_packets"] == False]
# Plot results with reinjection enabled with solid lines
plt.plot(tr["duty"], tr["sent"], label="sent", color="b")
plt.plot(tr["duty"], tr["received"], label="received", color="g")
plt.plot(tr["duty"], tr["dropped_multicast"], label="dropped", color="r")
# Plot results with reinjection disabled with dashed lines
plt.plot(tn["duty"], tn["sent"], linestyle="dashed", color="b")
plt.plot(tn["duty"], tn["received"], linestyle="dashed", color="g")
plt.plot(tn["duty"], tn["dropped_multicast"], linestyle="dashed", color="r")
plt.legend()
plt.xlabel("Network duty")
plt.show()
| gpl-2.0 |
sgould/fun_and_games | gs_fetch.py | 1 | 3983 | #!/usr/bin/env python
#
# Script to fetch Google Scholar pages from a CSV list of Google Scholar IDs and plot citations by grouping. Expects
# CSV data in the form of:
# <name>, <grouping>, <scholarID>
#
# For example:
# Stephen Gould, D, YvdzeM8AAAAJ
#
import os
import csv
import re
import numpy as np
import urllib.request
from plot_citations import GoogleScholarHTMLParser
import tkinter as tk
from tkinter import messagebox, filedialog
HTML_DIR = "gs_cache"
URL_TEMPLATE = r"https://scholar.google.com.au/citations?user={}&hl=en"
app_wnd = tk.Tk()
app_wnd.withdraw() # hide application window
CSV_FILE = tk.filedialog.askopenfilename(title="Scholar IDs", filetypes=(("CVS Files", "*.csv"), ("All Files", "*.*")))
if not CSV_FILE: exit(0)
OVERWRITE = tk.messagebox.askyesno("Overwrite", "Do you wish to overwrite existing data?")
if not os.path.isdir(HTML_DIR):
os.makedirs(HTML_DIR)
# fetch Google Scholar pages
with open(CSV_FILE, 'rt', encoding='utf8') as file:
reader = csv.reader(file, skipinitialspace=True)
next(reader, None) # skip header
for name, grouping, gs_id in reader:
if len(gs_id) == 0:
continue
cache_filename = os.path.join(HTML_DIR, "{}.html".format(gs_id))
if OVERWRITE or not os.path.exists(cache_filename):
url = URL_TEMPLATE.format(gs_id)
print("Fetching URL {}...".format(url))
try:
response = urllib.request.urlopen(url)
html = str(response.read())
response.close()
html = re.sub("\\\\t|\\\\r\\\\n", "", html)
with open(cache_filename, 'wt') as outfile:
outfile.write(html)
except:
print("ERROR: could not retrieve or update data for {}".format(name))
# process Google Scholar pages
data = {}
with open(CSV_FILE, 'rt', encoding='utf8') as file:
reader = csv.reader(file, skipinitialspace=True)
next(reader, None) # skip header
for name, grouping, gs_id in reader:
cache_filename = os.path.join(HTML_DIR, "{}.html".format(gs_id))
if not os.path.exists(cache_filename):
print("WARNING: missing data for {}".format(name))
continue
with open(cache_filename, 'rt') as infile:
html = infile.read()
parser = GoogleScholarHTMLParser()
parser.feed(html)
#print("{:5d} {:5d} {}".format(parser.citeSummaryData[0], parser.citeSummaryData[1], parser.citeCounts[-1], name))
grouping = grouping[0]
if grouping not in data:
data[grouping] = []
data[grouping].append((parser.citeSummaryData[0], parser.citeSummaryData[1], parser.citeCounts[-1], name))
for key in sorted(data.keys()):
print("Career median for grouping {} is {}".format(key, np.median([v[0] for v in data[key]])))
for key in sorted(data.keys()):
print("Last 5-years median for grouping {} is {}".format(key, np.median([v[1] for v in data[key]])))
x, y1, y2, y3, n = [], [], [], [], []
for key in sorted(data.keys()):
data[key] = sorted(data[key])
x += [key] * len(data[key])
y1 += [v[0] for v in data[key]]
y2 += [v[1] for v in data[key]]
y3 += [v[2] for v in data[key]]
n += [v[3] for v in data[key]]
import seaborn as sea
import matplotlib.pyplot as plt
sea.set(font_scale=1.2)
plt.figure()
width = 0.8
plt.bar(np.linspace(0, len(y1), len(y1)), y1, width, align='center')
plt.bar(np.linspace(0, len(y2), len(y2)), y2, width, color=[0.8, 0.0, 0.0], align='center')
plt.bar(np.linspace(0, len(y3), len(y3)), y3, width, color=[0.0, 0.8, 0.0], align='center')
if 0:
plt.xticks(np.linspace(0, len(x), len(x)), x)
plt.xlabel('Researcher (Group)'); plt.ylabel('Citations')
else:
plt.xticks(np.linspace(0, len(x), len(x)), n)
plt.xlabel('Researcher (Name)'); plt.ylabel('Citations')
plt.xticks(rotation=90)
plt.legend(['Total', 'Last 5 Years', 'Current Year'], loc='upper left')
plt.show()
| mit |
jwlawson/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 41 | 20535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.test_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
cavestruz/L500analysis | plotting/profiles/T_Vcirc_evolution/Ttot_Vcirc_evolution/plot_Ttot_Vcirc_r200m.py | 1 | 2845 | from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.utils.constants import rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['T_mw', 'r_mid',
'vel_gas_rad_std', 'vel_gas_tan_std',
'vel_gas_rad_avg', 'vel_gas_tan_avg',
'M_dark', 'M_star', 'M_gas',
'Ttot_Vcirc2_ratio_200m',
'R/R200m']
halo_properties_list=['r200m','M_total_200m','nu_200m']
Ttot_Vcirc2_ratio=r"$\Xi=T_{tot}/V^2_{circ}$"
fXz1=r"$\Xi/\Xi(z=1)$"
pa = PlotAxes(figname='Ttot_Vcirc2_ratio_200m',
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Ttot_Vcirc2_ratio,fXz1],
xlabel=r"$R/R_{200m}$",
xlim=(0.2,2),
ylims=[(0.2,0.8),(0.6,1.4)])
TratioV2={}
plots=[TratioV2]
clkeys=['Ttot_Vcirc2_ratio_200m']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
for p, key in zip(plots,clkeys) :
p[aexp] = calculate_profiles_mean_variance(cldata[key])
pa.axes[Ttot_Vcirc2_ratio].plot( rbins, TratioV2[aexp]['mean'],
color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
pa.axes[Ttot_Vcirc2_ratio].fill_between(rbins, TratioV2[0.5]['down'],
TratioV2[0.5]['up'],
color=color(0.5), zorder=0)
for aexp in aexps :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=p[aexp]['mean'],
var_profile1=p[aexp]['var'],
mean_profile2=p[0.5]['mean'],
var_profile2=p[0.5]['var'],
)
pa.axes[fXz1].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls='-')
pa.axes[Ttot_Vcirc2_ratio].tick_params(labelsize=12)
pa.axes[Ttot_Vcirc2_ratio].tick_params(labelsize=12)
pa.axes[fXz1].set_yticks(arange(0.6,1.4,0.2))
pa.set_legend(axes_label=Ttot_Vcirc2_ratio,ncol=3,loc='best', frameon=False)
pa.color_legend_texts(axes_label=Ttot_Vcirc2_ratio)
pa.savefig()
| mit |
ThomasMiconi/htmresearch | htmresearch/frameworks/layers/continuous_location_object_machine.py | 10 | 12521 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from nupic.encoders.coordinate import CoordinateEncoder
from htmresearch.frameworks.layers.object_machine_base import ObjectMachineBase
class ContinuousLocationObjectMachine(ObjectMachineBase):
"""
This implementation of the object machine uses continuous locations instead
of discrete random ones. They are created using a CoordinateEncoder.
The "objects" should be PhysicalObjects as defined in physical_object_base
and physical_objects. Subclass the base implementation for specific needs.
"""
def __init__(self,
numInputBits=41,
sensorInputSize=2048,
externalInputSize=2048,
numCorticalColumns=1,
numFeatures=400,
dimension=3,
seed=42):
"""
At creation, the SimpleObjectMachine creates a pool of locations and
features SDR's.
Parameters:
----------------------------
@param numInputBits (int)
Number of ON bits in the input. Note: it should be uneven as the
encoder only accepts uneven number of bits.
@param sensorInputSize (int)
Total number of bits in the sensory input
@param externalInputSize (int)
Total number of bits the external (location) input
@param numCorticalColumns (int)
Number of cortical columns used in the experiment
@param dimension (int)
Dimension of the locations. Will typically be 3.
@param numFeatures (int)
Number of feature SDRs to generate per cortical column. There is
typically no need to not use the default value, unless the user
knows he will use more than 400 patterns.
@param seed (int)
Seed to be used in the machine
"""
super(ContinuousLocationObjectMachine, self).__init__(numInputBits,
sensorInputSize,
externalInputSize,
numCorticalColumns,
seed)
# location and features pool
self.numFeatures = numFeatures
self._generateFeatures()
self.dimension = dimension
self.locationEncoder = CoordinateEncoder(
w=numInputBits,
n=externalInputSize,
name="locationEncoder"
)
def provideObjectsToLearn(self, learningConfig, plot=False):
"""
Returns the objects in a canonical format to be sent to an experiment.
The input, learningConfig, should have the following format. It is a
mapping from object to a list of features to sample locations from, and
the number of points to sample from each feature. Note that these objects
should be first added with .addObjects().
These features can be either hard-coded with their key or accessed
with .getFeatures.
An other possibility is to directly specify locations. The machine will
use the object to find the corresponding feature (an empty feature will
be sent if the location is not on the object's surface).
learningConfig = {
# hard-coded keys and number of points
"cube": [("face", 5), ("edge", 5), ("vertex", 3)],
# programmatically-accessed keys and number of points
"cylinder": [(feature, 5) for feature in cylinder.getFeatures()],
# specific locations
"sphere": [(10, 5, 3), (12, 45, 32), (12, 5, 46)],
}
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
Parameters:
----------------------------
@param learningConfig (dict)
Configuration for learning, as described above.
"""
objects = {}
for objectName, locationList in learningConfig.iteritems():
sensationList = []
physicalObject = self.objects[objectName]
if plot:
fig, ax = physicalObject.plot()
for element in locationList:
# location name and number of points
if len(element) == 2:
featureName, numLocations = element
for _ in xrange(numLocations):
location = physicalObject.sampleLocationFromFeature(featureName)
sensationList.append(
self._getSDRPairs(
[(location,
physicalObject.getFeatureID(location))] * self.numColumns
)
)
if plot:
x, y, z = tuple(location)
ax.scatter(x, y, z, marker="v", s=100, c="r")
# explicit location
elif len(element) == 3:
location = list(element)
sensationList.append(
self._getSDRPairs(
[(location,
physicalObject.getFeatureID(location))] * self.numColumns
)
)
if plot:
x, y, z = tuple(location)
ax.scatter(x, y, z, marker="v", s=100, c="r")
else:
raise ValueError("Unsupported type for location spec")
objects[objectName] = sensationList
if plot:
plt.title("Learning points for object {}".format(objectName))
plt.savefig("learn_{}.png".format(objectName))
plt.close()
self._checkObjectsToLearn(objects)
return objects
def provideObjectToInfer(self, inferenceConfig, plot=False):
"""
Returns the sensations in a canonical format to be sent to an experiment.
The input inferenceConfig should be a dict with the following form. The
"pairs" field provide a mapping from cortical column to a list of
sensations, each sensations being either:
- a feature key to sample a location from
- an explicit location
{
"numSteps": 2 # number of sensations
"noiseLevel": 0.05 # noise to add to sensations (optional)
"objectName": 0 # optional
"pairs": {
0: ["random", "face"] # locations for cortical column 0
1: [(12, 32, 34), (23, 23, 32)] # locations for cortical column 1
}
}
The returned format is a a lists of sensations, each sensation being a
mapping from cortical column index to a pair of SDR's (one location and
one feature).
Parameters:
----------------------------
@param inferenceConfig (dict)
Inference spec for experiment (cf above for format)
"""
if "numSteps" in inferenceConfig:
numSteps = inferenceConfig["numSteps"]
else:
numSteps = len(inferenceConfig["pairs"][0])
if "noiseLevel" in inferenceConfig:
noise = inferenceConfig["noiseLevel"]
else:
noise = None
# some checks
if numSteps == 0:
raise ValueError("No inference steps were provided")
for col in xrange(self.numColumns):
if len(inferenceConfig["pairs"][col]) != numSteps:
raise ValueError("Incompatible numSteps and actual inference steps")
if "objectName" in inferenceConfig:
physicalObject = self.objects[inferenceConfig["objectName"]]
else:
physicalObject = None
if plot:
# don't use if object is not known
fig, ax = physicalObject.plot()
colors = plt.cm.rainbow(np.linspace(0, 1, numSteps))
sensationSteps = []
for step in xrange(numSteps):
pairs = [
inferenceConfig["pairs"][col][step] for col in xrange(self.numColumns)
]
for i in xrange(len(pairs)):
if isinstance(pairs[i], str):
location = physicalObject.sampleLocationFromFeature(pairs[i])
pairs[i] = (
location,
physicalObject.getFeatureID(location)
)
else:
location = pairs[i]
pairs[i] = (
location,
physicalObject.getFeatureID(location)
)
if plot:
x, y, z = tuple(location)
ax.scatter(x, y, z, marker="v", s=100, c=colors[step])
sensationSteps.append(self._getSDRPairs(pairs, noise=noise))
if plot:
plt.title("Inference points for object {}".format(
inferenceConfig["objectName"])
)
plt.savefig("infer_{}.png".format( inferenceConfig["objectName"]))
plt.close()
self._checkObjectToInfer(sensationSteps)
return sensationSteps
def addObject(self, object, name=None):
"""
Adds an object to the Machine.
Objects should be PhysicalObjects.
"""
if name is None:
name = len(self.objects)
self.objects[name] = object
def _getSDRPairs(self, pairs, noise=None):
"""
This method takes a list of (location, feature) pairs (one pair per
cortical column), and returns a sensation dict in the correct format,
adding noise if necessary.
In each pair, the location is an actual integer location to be encoded,
and the feature is just an index.
"""
sensations = {}
for col in xrange(self.numColumns):
location, featureID = pairs[col]
location = [int(coord) for coord in location]
location = self.locationEncoder.encode(
(np.array(location, dtype="int32"), self._getRadius(location))
)
location = set(location.nonzero()[0])
# generate empty feature if requested
if featureID == -1:
feature = set()
# generate union of features if requested
elif isinstance(featureID, tuple):
feature = set()
for idx in list(featureID):
feature = feature | self.features[col][idx]
else:
feature = self.features[col][featureID]
if noise is not None:
location = self._addNoise(location, noise)
feature = self._addNoise(feature, noise)
sensations[col] = (location, feature)
return sensations
def _getRadius(self, location):
"""
Returns the radius associated with the given location.
This is a bit of an awkward argument to the CoordinateEncoder, which
specifies the resolution (in was used to encode differently depending on
speed in the GPS encoder). Since the coordinates are object-centric,
for now we use the "point radius" as an heuristic, but this should be
experimented and improved.
"""
# TODO: find better heuristic
return int(math.sqrt(sum([coord ** 2 for coord in location])))
def _addNoise(self, pattern, noiseLevel):
"""
Adds noise the given list of patterns and returns a list of noisy copies.
"""
if pattern is None:
return None
newBits = []
for bit in pattern:
if random.random() < noiseLevel:
newBits.append(random.randint(0, max(pattern)))
else:
newBits.append(bit)
return set(newBits)
def _generatePattern(self, numBits, totalSize):
"""
Generates a random SDR with specified number of bits and total size.
"""
cellsIndices = range(totalSize)
random.shuffle(cellsIndices)
return set(cellsIndices[:numBits])
def _generateFeatures(self):
"""
Generates a pool of features to be used for the experiments.
For each index, numColumns SDR's are created, as locations for the same
feature should be different for each column.
"""
size = self.sensorInputSize
bits = self.numInputBits
self.features = []
for _ in xrange(self.numColumns):
self.features.append(
[self._generatePattern(bits, size) for _ in xrange(self.numFeatures)]
)
| agpl-3.0 |
ppries/tensorflow | tensorflow/examples/learn/text_classification.py | 1 | 4925 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = tf.contrib.layers.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
wxxth/MDPLA | exact_mdp/route_planning.py | 2 | 3138 | import matplotlib.pyplot as plt
from exact_mdp.mdp import *
import time
def init_mdp():
state = [State('Home'),
State('x2'),
State('Work')]
miu = {'miu1': (state[0], REL, PiecewisePolynomial([P([1])], [1 / 6, 1 / 6])), # miu1
'miu2': (state[2], ABS, PiecewisePolynomial([P([-16, 16/9]), P([56/3, -16/9])], [9, 9.75, 10.5])), # miu2
'miu3': (state[1], REL, PiecewisePolynomial([P([-1, 1]), P([3, -1])], [1, 2, 3])), # miu3
'miu4': (state[1], REL, PiecewisePolynomial([P([-1/2, 1]), P([5/2, -1])], [0.5, 1.5, 2.5])), # miu4
'miu5': (state[2], REL, PiecewisePolynomial([P([1])], [1, 1]))} # miu5
likelihood = [PiecewisePolynomial([P([1])], [7 + 50 / 60, 14]), # L1
PiecewisePolynomial([P([1])], [7, 7 + 50 / 60]), # L2
PiecewisePolynomial([P([-11, 3/2]), P([1]), P([31/2, -3/2])],
[7 + 20 / 60, 8, 9 + 40 / 60, 10 + 20 / 60]), # L3
PiecewisePolynomial([P([1]), P([12, -3/2]), P([0]), P([-14.5, 3/2]), P([1])],
[7, 7 + 20 / 60, 8, 9 + 40 / 60, 10 + 20 / 60, 14]), # L4
PiecewisePolynomial([P([1])], [7, 14])] # L5
reward = {'miu1': PiecewisePolynomial([P([0])], [7, 14]),
'miu2': PiecewisePolynomial([P([0])], [7, 14]),
'miu3': PiecewisePolynomial([P([0])], [7, 14]),
'miu4': PiecewisePolynomial([P([0])], [7, 14]),
'miu5': PiecewisePolynomial([P([0])], [7, 14])}
# add actions to states
state[0].add_action('taking train', 'miu1', likelihood[0]) # Miss the 8am train
state[0].add_action('taking train', 'miu2', likelihood[1]) # Caught the 8am train
state[0].add_action('driving', 'miu3', likelihood[2]) # Highway - rush hour
state[0].add_action('driving', 'miu4', likelihood[3]) # Highway - off peak
state[1].add_action('driving', 'miu5', likelihood[4]) # Drive on backroad
# assign value functions
# state[0].value_function = PiecewisePolynomial([Poly('0', x)], [7, 14])
# state[1].value_function = PiecewisePolynomial([Poly('0', x)], [7, 14])
# state[2].value_function = PiecewisePolynomial([Poly('0', x)], [7, 14])
mdp = MDP(state, miu, reward, state[0],
{state[2]: PiecewisePolynomial([P([1]), P([12, -1]), P([0])], [7, 11, 12, 14])},
[7, 14], lazy=0, pwc=0, lazy_error_tolerance=0.03)
return mdp
def main():
start_time=time.time()
for i in range(100):
mdp = init_mdp()
# test MDP
u = mdp.value_iteration()
print("--- %s seconds ---" % (time.time() - start_time))
# for s in u:
# print(s, u[s])
# bd = u[mdp.states[0]].bounds
# print(bd)
# t = []
# c = 0
# while c < len(bd) - 1:
# stp = (bd[c + 1] - bd[c]) / 2.0
# for itv in np.arange(bd[c], bd[c + 1], stp):
# t.append(itv)
# c += 1
# t.append(bd[-1])
# v = [u[mdp.states[0]](tt) for tt in t]
# print(t, v)
# plt.plot(t, v)
# plt.show()
if __name__ == "__main__":
main() | apache-2.0 |
ctensmeyer/pagenet | train.py | 1 | 8707 | #!/usr/bin/python
import os
import sys
import collections
import argparse
import numpy as np
import matplotlib
matplotlib.use("AGG")
import matplotlib.pyplot as plt
import caffe
import cv2
import random
def safe_mkdir(_dir):
try:
os.makedirs(_dir)
except:
pass
def dump_debug(out_dir, data, dump_images=False):
pred_image_dir = os.path.join(out_dir, 'pred_images')
safe_mkdir(pred_image_dir)
for idx in xrange(len(data['images'])):
fn = data['filenames'][idx]
preds = data['predictions'][idx]
fn_base = fn.replace('/', '_')[:-4]
out_fn = os.path.join(pred_image_dir, fn_base + ".png")
cv2.imwrite(out_fn, 255 * preds)
def predict(network, im, output_blob, args):
if im.ndim > 2:
im = np.transpose(im, axes=(2, 0, 1))
network.blobs["data"].data[0,:,:,:] = im
network.forward()
response = network.blobs[output_blob].data[0,:].copy()
return np.argmax(response, axis=0)
def iou(im1, im2):
num_intersect = np.sum(np.logical_and(im1, im2))
num_union = num_intersect + np.sum(np.logical_xor(im1, im2))
return float(num_intersect) / num_union
def prf(im1, im2):
num_intersect = np.sum(np.logical_and(im1, im2))
num_1 = np.sum(im1)
num_2 = np.sum(im2)
p = num_intersect / float(num_1)
r = num_intersect / float(num_2)
f = (2 * p * r) / (p + r) if (p + r) else 0
return p, r, f
def update_predictions(net, data, args):
print "Starting Predictions"
total_iou = 0
total_p = 0
total_r = 0
total_f = 0
for idx in xrange(len(data['images'])):
im = cv2.resize(data['images'][idx], (args.image_size, args.image_size))
outputs = predict(net, im, 'out', args)
data['predictions'][idx] = outputs.copy()
width, height = data['original_size'][idx]
outputs = cv2.resize(outputs, (width, height), interpolation=cv2.INTER_NEAREST)
total_iou += iou(outputs, data['original_gt'][idx])
p, r, f = prf(outputs, data['original_gt'][idx])
total_p += p
total_r += r
total_f += f
if idx and idx % args.print_count == 0:
print "\tPredicted %d/%d" % (idx, len(data['images']))
avg_iou = total_iou / len(data['images'])
avg_p = total_p / len(data['images'])
avg_r = total_r / len(data['images'])
avg_f = total_f / len(data['images'])
return avg_iou, avg_p, avg_r, avg_f
def load_data(manifest, _dir, size, color=False):
dataset = collections.defaultdict(list)
file_list = map(lambda s: s.strip(), open(manifest, 'r').readlines())
for line in file_list:
tokens = line.split(',')
f = tokens[0]
coords = map(float, tokens[1:9])
dataset['filenames'].append(f)
resolved = os.path.join(_dir, f)
im = cv2.imread(resolved, 1 if color else 0)
gt = np.zeros(im.shape[:2], dtype=np.uint8)
cv2.fillPoly(gt, np.array(coords).reshape((4, 2)).astype(np.int32)[np.newaxis,:,:], 1)
if im is None:
raise Exception("Error loading %s" % resolved)
height, width = im.shape[:2]
im = cv2.resize(im, (size, size))
dataset['original_gt'].append(gt)
gt = cv2.resize(gt, (size, size), interpolation=cv2.INTER_NEAREST)
dataset['images'].append(im)
dataset['original_size'].append( (width, height) ) # opencv does (w,h)
dataset['gt'].append(gt)
return dataset
def preprocess_data(data, args):
for idx in xrange(len(data['images'])):
im = data['images'][idx]
im = args.scale * (im - args.mean)
data['images'][idx] = im
gt = data['gt'][idx]
data['predictions'].append(gt.copy())
def get_solver_params(f):
max_iters = 0
snapshot = 0
for line in open(f).readlines():
tokens = line.split()
if tokens[0] == 'max_iter:':
max_iters = int(tokens[1])
if tokens[0] == 'snapshot:':
snapshot = int(tokens[1])
return max_iters, snapshot
def presolve(net, args):
net.blobs["data"].reshape(args.batch_size, 3 if args.color else 1, args.image_size, args.image_size)
net.blobs["gt"].reshape(args.batch_size, 1, args.image_size, args.image_size)
def set_input_data(net, data, args):
for batch_idx in xrange(args.batch_size):
im_idx = random.randint(0, len(data['images']) - 1)
im = data['images'][im_idx]
gt = data['gt'][im_idx]
if im.ndim > 2:
im = np.transpose(im, (2, 0, 1))
net.blobs["data"].data[batch_idx,:,:,:] = im
net.blobs["gt"].data[batch_idx,0,:,:] = gt
def main(args):
train_data = load_data(args.train_manifest, args.dataset_dir, args.image_size, args.color)
val_data = load_data(args.val_manifest, args.dataset_dir, args.image_size, args.color)
preprocess_data(train_data, args)
preprocess_data(val_data, args)
print "Done loading data"
solver = caffe.SGDSolver(args.solver_file)
max_iters, snapshot_interval = get_solver_params(args.solver_file)
presolve(solver.net, args)
train_iou, val_iou = [], []
train_p, val_p = [], []
train_r, val_r = [], []
train_f, val_f = [], []
for iter_num in xrange(max_iters + 1):
set_input_data(solver.net, train_data, args)
solver.step(1)
if iter_num and iter_num % snapshot_interval == 0:
print "Validation Prediction: %d" % iter_num
avg_iou, avg_p, avg_r, avg_f = update_predictions(solver.net, val_data, args)
val_iou.append((iter_num, avg_iou))
val_p.append((iter_num, avg_p))
val_r.append((iter_num, avg_r))
val_f.append((iter_num, avg_f))
if args.debug_dir:
print "Dumping images"
out_dir = os.path.join(args.debug_dir, 'val_%d' % iter_num)
dump_debug(out_dir, val_data)
if iter_num >= args.min_interval and iter_num % args.gt_interval == 0:
print "Train Prediction: %d" % iter_num
avg_iou, avg_p, avg_r, avg_f = update_predictions(solver.net, train_data, args)
train_iou.append((iter_num, avg_iou))
train_p.append((iter_num, avg_p))
train_r.append((iter_num, avg_r))
train_f.append((iter_num, avg_f))
print "Train IOU: ", train_iou
print
print "Val IOU: ", val_iou
if args.debug_dir:
plt.plot(*zip(*train_iou), label='train')
plt.plot(*zip(*val_iou), label='val')
plt.legend()
plt.savefig(os.path.join(args.debug_dir, 'iou.png'))
plt.clf()
plt.plot(*zip(*train_p), label='train')
plt.plot(*zip(*val_p), label='val')
plt.legend()
plt.savefig(os.path.join(args.debug_dir, 'precision.png'))
plt.clf()
plt.plot(*zip(*train_r), label='train')
plt.plot(*zip(*val_r), label='val')
plt.legend()
plt.savefig(os.path.join(args.debug_dir, 'recall.png'))
plt.clf()
plt.plot(*zip(*train_f), label='train')
plt.plot(*zip(*val_f), label='val')
plt.legend()
plt.savefig(os.path.join(args.debug_dir, 'fmeasure.png'))
_ = update_predictions(solver.net, train_data, args)
out_dir = os.path.join(args.debug_dir, 'train_final')
dump_debug(out_dir, train_data, True)
_ = update_predictions(solver.net, val_data, args)
out_dir = os.path.join(args.debug_dir, 'val_final')
dump_debug(out_dir, val_data, True)
for name, vals in zip(['train_iou', 'val_iou', 'train_p', 'val_p',
'train_r', 'val_r', 'train_f', 'val_f'],
[train_iou, val_iou, train_p, val_p,
train_r, val_r, train_f, val_f]):
fd = open(os.path.join(args.debug_dir, "%s.txt" % name), 'w')
fd.write('%r\n' % vals)
fd.close()
def get_args():
parser = argparse.ArgumentParser(description="Outputs binary predictions")
parser.add_argument("solver_file",
help="The solver.prototxt")
parser.add_argument("dataset_dir",
help="The dataset to be evaluated")
parser.add_argument("train_manifest",
help="txt file listing images to train on")
parser.add_argument("val_manifest",
help="txt file listing images for validation")
parser.add_argument("--gpu", type=int, default=0,
help="GPU to use for running the network")
parser.add_argument("-m", "--mean", type=float, default=127.,
help="Mean value for data preprocessing")
parser.add_argument("-s", "--scale", type=float, default=1.,
help="Optional pixel scale factor")
parser.add_argument("-b", "--batch-size", default=2, type=int,
help="Training batch size")
parser.add_argument("-c", "--color", default=False, action='store_true',
help="Training batch size")
parser.add_argument("--image-size", default=256, type=int,
help="Size of images for input to training/prediction")
parser.add_argument("--gt-interval", default=5000, type=int,
help="Interval for Debug")
parser.add_argument("--min-interval", default=5000, type=int,
help="Miniumum iteration for Debug")
parser.add_argument("--debug-dir", default='debug', type=str,
help="Dump images for debugging")
parser.add_argument("--print-count", default=10, type=int,
help="How often to print progress")
args = parser.parse_args()
print args
return args
if __name__ == "__main__":
args = get_args()
if args.gpu >= 0:
caffe.set_device(args.gpu)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
main(args)
| bsd-3-clause |
koobonil/Boss2D | Boss2D/addon/webrtc-jumpingyang001_for_boss/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py | 3 | 6534 | #!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Finds the APM configuration that maximizes a provided metric by
parsing the output generated apm_quality_assessment.py.
"""
from __future__ import division
import collections
import logging
import os
import quality_assessment.data_access as data_access
import quality_assessment.collect_data as collect_data
def _InstanceArgumentsParser():
"""Arguments parser factory. Extends the arguments from 'collect_data'
with a few extra for selecting what parameters to optimize for.
"""
parser = collect_data.InstanceArgumentsParser()
parser.description = (
'Rudimentary optimization of a function over different parameter'
'combinations.')
parser.add_argument('-n', '--config_dir', required=False,
help=('path to the folder with the configuration files'),
default='apm_configs')
parser.add_argument('-p', '--params', required=True, nargs='+',
help=('parameters to parse from the config files in'
'config_dir'))
parser.add_argument('-z', '--params_not_to_optimize', required=False,
nargs='+', default=[],
help=('parameters from `params` not to be optimized for'))
return parser
def _ConfigurationAndScores(data_frame, params,
params_not_to_optimize, config_dir):
"""Returns a list of all configurations and scores.
Args:
data_frame: A pandas data frame with the scores and config name
returned by _FindScores.
params: The parameter names to parse from configs the config
directory
params_not_to_optimize: The parameter names which shouldn't affect
the optimal parameter
selection. E.g., fixed settings and not
tunable parameters.
config_dir: Path to folder with config files.
Returns:
Dictionary of the form
{param_combination: [{params: {param1: value1, ...},
scores: {score1: value1, ...}}]}.
The key `param_combination` runs over all parameter combinations
of the parameters in `params` and not in
`params_not_to_optimize`. A corresponding value is a list of all
param combinations for params in `params_not_to_optimize` and
their scores.
"""
results = collections.defaultdict(list)
config_names = data_frame['apm_config'].drop_duplicates().values.tolist()
score_names = data_frame['eval_score_name'].drop_duplicates().values.tolist()
# Normalize the scores
normalization_constants = {}
for score_name in score_names:
scores = data_frame[data_frame.eval_score_name == score_name].score
normalization_constants[score_name] = max(scores)
params_to_optimize = [p for p in params if p not in params_not_to_optimize]
param_combination = collections.namedtuple("ParamCombination",
params_to_optimize)
for config_name in config_names:
config_json = data_access.AudioProcConfigFile.Load(
os.path.join(config_dir, config_name + ".json"))
scores = {}
data_cell = data_frame[data_frame.apm_config == config_name]
for score_name in score_names:
data_cell_scores = data_cell[data_cell.eval_score_name ==
score_name].score
scores[score_name] = sum(data_cell_scores) / len(data_cell_scores)
scores[score_name] /= normalization_constants[score_name]
result = {'scores': scores, 'params': {}}
config_optimize_params = {}
for param in params:
if param in params_to_optimize:
config_optimize_params[param] = config_json['-' + param]
else:
result['params'][param] = config_json['-' + param]
current_param_combination = param_combination(
**config_optimize_params)
results[current_param_combination].append(result)
return results
def _FindOptimalParameter(configs_and_scores, score_weighting):
"""Finds the config producing the maximal score.
Args:
configs_and_scores: structure of the form returned by
_ConfigurationAndScores
score_weighting: a function to weight together all score values of
the form [{params: {param1: value1, ...}, scores:
{score1: value1, ...}}] into a numeric
value
Returns:
the config that has the largest values of |score_weighting| applied
to its scores.
"""
min_score = float('+inf')
best_params = None
for config in configs_and_scores:
scores_and_params = configs_and_scores[config]
current_score = score_weighting(scores_and_params)
if current_score < min_score:
min_score = current_score
best_params = config
logging.debug("Score: %f", current_score)
logging.debug("Config: %s", str(config))
return best_params
def _ExampleWeighting(scores_and_configs):
"""Example argument to `_FindOptimalParameter`
Args:
scores_and_configs: a list of configs and scores, in the form
described in _FindOptimalParameter
Returns:
numeric value, the sum of all scores
"""
res = 0
for score_config in scores_and_configs:
res += sum(score_config['scores'].values())
return res
def main():
# Init.
# TODO(alessiob): INFO once debugged.
logging.basicConfig(level=logging.DEBUG)
parser = _InstanceArgumentsParser()
args = parser.parse_args()
# Get the scores.
src_path = collect_data.ConstructSrcPath(args)
logging.debug('Src path <%s>', src_path)
scores_data_frame = collect_data.FindScores(src_path, args)
all_scores = _ConfigurationAndScores(scores_data_frame,
args.params,
args.params_not_to_optimize,
args.config_dir)
opt_param = _FindOptimalParameter(all_scores, _ExampleWeighting)
logging.info('Optimal parameter combination: <%s>', opt_param)
logging.info('It\'s score values: <%s>', all_scores[opt_param])
if __name__ == "__main__":
main()
| mit |
hitszxp/scikit-learn | sklearn/utils/__init__.py | 7 | 13252 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite, warn_if_not_float,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable)
from .class_weight import compute_class_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"warn_if_not_float",
"check_random_state",
"compute_class_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable']
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
return X.iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
arrays = [check_array(x, accept_sparse='csr', ensure_2d=False,
allow_nd=True) for x in arrays]
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/utils/tests/test_class_weight.py | 26 | 2001 | import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
def test_compute_class_weight():
"""Test (and demo) compute_class_weight."""
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
"""Raise error when y does not contain all class labels"""
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
def test_compute_class_weight_auto_negative():
"""Test compute_class_weight when labels are negative"""
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
def test_compute_class_weight_auto_unordered():
"""Test compute_class_weight when classes are unordered"""
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
| bsd-3-clause |
pnedunuri/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
dgwakeman/mne-python | mne/report.py | 1 | 60245 | """Generate html report from MNE database
"""
# Authors: Alex Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import fnmatch
import re
import codecs
import time
from glob import glob
import warnings
import base64
from datetime import datetime as dt
import numpy as np
from . import read_evokeds, read_events, pick_types, read_cov
from .io import Raw, read_info
from .utils import _TempDir, logger, verbose, get_subjects_dir
from .viz import plot_events, plot_trans, plot_cov
from .viz._3d import _plot_mri_contours
from .forward import read_forward_solution
from .epochs import read_epochs
from .minimum_norm import read_inverse_operator
from .parallel import parallel_func, check_n_jobs
from .externals.tempita import HTMLTemplate, Template
from .externals.six import BytesIO
from .externals.six import moves
VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
'-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
'-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
'-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
'-ave.fif', '-ave.fif.gz', 'T1.mgz']
SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
'mri', 'forward', 'inverse']
###############################################################################
# PLOTTING FUNCTIONS
def _fig_to_img(function=None, fig=None, image_format='png',
scale=None, **kwargs):
"""Wrapper function to plot figure and create a binary image"""
import matplotlib.pyplot as plt
if function is not None:
plt.close('all')
fig = function(**kwargs)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
fig.savefig(output, format=image_format, bbox_inches='tight',
dpi=fig.get_dpi())
plt.close(fig)
output = output.getvalue()
return (output if image_format == 'svg' else
base64.b64encode(output).decode('ascii'))
def _scale_mpl_figure(fig, scale):
"""Magic scaling helper
Keeps font-size and artist sizes constant
0.5 : current font - 4pt
2.0 : current font + 4pt
XXX it's unclear why this works, but good to go for most cases
"""
fig.set_size_inches(fig.get_size_inches() * scale)
fig.set_dpi(fig.get_dpi() * scale)
import matplotlib as mpl
if scale >= 1:
sfactor = scale ** 2
elif scale < 1:
sfactor = -((1. / scale) ** 2)
for text in fig.findobj(mpl.text.Text):
fs = text.get_fontsize()
new_size = fs + sfactor
if new_size <= 0:
raise ValueError('could not rescale matplotlib fonts, consider '
'increasing "scale"')
text.set_fontsize(new_size)
fig.canvas.draw()
def _figs_to_mrislices(sl, n_jobs, **kwargs):
import matplotlib.pyplot as plt
plt.close('all')
use_jobs = min(n_jobs, max(1, len(sl)))
parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
outs = parallel(p_fun(slices=s, **kwargs)
for s in np.array_split(sl, use_jobs))
for o in outs[1:]:
outs[0] += o
return outs[0]
def _iterate_trans_views(function, **kwargs):
"""Auxiliary function to iterate over views in trans fig.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
import mayavi
fig = function(**kwargs)
assert isinstance(fig, mayavi.core.scene.Scene)
views = [(90, 90), (0, 90), (0, -90)]
fig2, axes = plt.subplots(1, len(views))
for view, ax in zip(views, axes):
mayavi.mlab.view(view[0], view[1])
# XXX: save_bmp / save_png / ...
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test.png')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
im = imread(temp_fname)
else: # Testing mode
im = np.zeros((2, 2, 3))
ax.imshow(im)
ax.axis('off')
mayavi.mlab.close(fig)
img = _fig_to_img(fig=fig2)
return img
###############################################################################
# TOC FUNCTIONS
def _is_bad_fname(fname):
"""Auxiliary function for identifying bad file naming patterns
and highlighting them in red in the TOC.
"""
if fname.endswith('(whitened)'):
fname = fname[:-11]
if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
return 'red'
else:
return ''
def _get_toc_property(fname):
"""Auxiliary function to assign class names to TOC
list elements to allow toggling with buttons.
"""
if fname.endswith(('-eve.fif', '-eve.fif.gz')):
div_klass = 'events'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
div_klass = 'covariance'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
div_klass = 'raw'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
div_klass = 'trans'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
div_klass = 'forward'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
div_klass = 'inverse'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
div_klass = 'epochs'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith(('bem')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith('(whitened)'):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname[:-11]) + '(whitened)'
else:
div_klass = fname.split('-#-')[1]
tooltip = fname.split('-#-')[0]
text = fname.split('-#-')[0]
return div_klass, tooltip, text
def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error):
"""Auxiliary function to parallel process in batch mode.
"""
htmls, report_fnames, report_sectionlabels = [], [], []
def _update_html(html, report_fname, report_sectionlabel):
"""Update the lists above."""
htmls.append(html)
report_fnames.append(report_fname)
report_sectionlabels.append(report_sectionlabel)
for fname in fnames:
logger.info("Rendering : %s"
% op.join('...' + report.data_path[-20:],
fname))
try:
if fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
html = report._render_raw(fname)
report_fname = fname
report_sectionlabel = 'raw'
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
html = report._render_forward(fname)
report_fname = fname
report_sectionlabel = 'forward'
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
html = report._render_inverse(fname)
report_fname = fname
report_sectionlabel = 'inverse'
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
if cov is not None:
html = report._render_whitened_evoked(fname, cov, baseline)
report_fname = fname + ' (whitened)'
report_sectionlabel = 'evoked'
_update_html(html, report_fname, report_sectionlabel)
html = report._render_evoked(fname, baseline)
report_fname = fname
report_sectionlabel = 'evoked'
elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
html = report._render_eve(fname, sfreq)
report_fname = fname
report_sectionlabel = 'events'
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
html = report._render_epochs(fname)
report_fname = fname
report_sectionlabel = 'epochs'
elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
report.info_fname is not None):
html = report._render_cov(fname, info)
report_fname = fname
report_sectionlabel = 'covariance'
elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
report.info_fname is not None and report.subjects_dir
is not None and report.subject is not None):
html = report._render_trans(fname, report.data_path, info,
report.subject,
report.subjects_dir)
report_fname = fname
report_sectionlabel = 'trans'
else:
html = None
report_fname = None
report_sectionlabel = None
except Exception as e:
if on_error == 'warn':
logger.warning('Failed to process file %s:\n"%s"' % (fname, e))
elif on_error == 'raise':
raise
html = None
report_fname = None
report_sectionlabel = None
_update_html(html, report_fname, report_sectionlabel)
return htmls, report_fnames, report_sectionlabels
###############################################################################
# IMAGE FUNCTIONS
def _build_image(data, cmap='gray'):
"""Build an image encoded in base64.
"""
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figsize = data.shape[::-1]
if figsize[0] == 1:
figsize = tuple(figsize[1:])
data = data[:, :, 0]
fig = Figure(figsize=figsize, dpi=1.0, frameon=False)
FigureCanvas(fig)
cmap = getattr(plt.cm, cmap, plt.cm.gray)
fig.figimage(data, cmap=cmap)
output = BytesIO()
fig.savefig(output, dpi=1.0, format='png')
return base64.b64encode(output.getvalue()).decode('ascii')
def _iterate_sagittal_slices(array, limits=None):
"""Iterate sagittal slice.
"""
shape = array.shape[0]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[ind, :, :]
def _iterate_axial_slices(array, limits=None):
"""Iterate axial slice.
"""
shape = array.shape[1]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[:, ind, :]
def _iterate_coronal_slices(array, limits=None):
"""Iterate coronal slice.
"""
shape = array.shape[2]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, np.flipud(np.rot90(array[:, :, ind]))
def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap,
image_format='png'):
"""Auxiliary function for parallel processing of mri slices.
"""
img_klass = 'slideimg-%s' % name
caption = u'Slice %s %s' % (name, ind)
slice_id = '%s-%s-%s' % (name, global_id, ind)
div_klass = 'span12 %s' % slides_klass
img = _build_image(data, cmap=cmap)
first = True if ind == 0 else False
html = _build_html_image(img, slice_id, div_klass,
img_klass, caption, first)
return ind, html
###############################################################################
# HTML functions
def _build_html_image(img, id, div_klass, img_klass, caption=None, show=True):
"""Build a html image from a slice array.
"""
html = []
add_style = u'' if show else u'style="display: none"'
html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
html.append(u'<div class="thumbnail">')
html.append(u'<img class="%s" alt="" style="width:90%%;" '
'src="data:image/png;base64,%s">'
% (img_klass, img))
html.append(u'</div>')
if caption:
html.append(u'<h4>%s</h4>' % caption)
html.append(u'</li>')
return u'\n'.join(html)
slider_template = HTMLTemplate(u"""
<script>$("#{{slider_id}}").slider({
range: "min",
/*orientation: "vertical",*/
min: {{minvalue}},
max: {{maxvalue}},
step: {{step}},
value: {{startvalue}},
create: function(event, ui) {
$(".{{klass}}").hide();
$("#{{klass}}-{{startvalue}}").show();},
stop: function(event, ui) {
var list_value = $("#{{slider_id}}").slider("value");
$(".{{klass}}").hide();
$("#{{klass}}-"+list_value).show();}
})</script>
""")
def _build_html_slider(slices_range, slides_klass, slider_id):
"""Build an html slider for a given slices range and a slices klass.
"""
startvalue = slices_range[len(slices_range) // 2]
return slider_template.substitute(slider_id=slider_id,
klass=slides_klass,
step=slices_range[1] - slices_range[0],
minvalue=slices_range[0],
maxvalue=slices_range[-1],
startvalue=startvalue)
###############################################################################
# HTML scan renderer
header_template = Template(u"""
<!DOCTYPE html>
<html lang="fr">
<head>
{{include}}
<script type="text/javascript">
function togglebutton(class_name){
$(class_name).toggle();
if ($(class_name + '-btn').hasClass('active'))
$(class_name + '-btn').removeClass('active');
else
$(class_name + '-btn').addClass('active');
}
/* Scroll down on click to #id so that caption is not hidden
by navbar */
var shiftWindow = function() { scrollBy(0, -60) };
if (location.hash) shiftWindow();
window.addEventListener("hashchange", shiftWindow);
</script>
<style type="text/css">
body {
line-height: 1.5em;
font-family: arial, sans-serif;
}
h1 {
font-size: 30px;
text-align: center;
}
h4 {
text-align: center;
}
@link-color: @brand-primary;
@link-hover-color: darken(@link-color, 15%);
a{
color: @link-color;
&:hover {
color: @link-hover-color;
text-decoration: underline;
}
}
li{
list-style-type:none;
}
#wrapper {
text-align: left;
margin: 5em auto;
width: 700px;
}
#container{
position: relative;
}
#content{
margin-left: 22%;
margin-top: 60px;
width: 75%;
}
#toc {
margin-top: navbar-height;
position: fixed;
width: 20%;
height: 90%;
overflow: auto;
}
#toc li {
overflow: hidden;
padding-bottom: 2px;
margin-left: 20px;
}
#toc span {
float: left;
padding: 0 2px 3px 0;
}
div.footer {
background-color: #C0C0C0;
color: #000000;
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: right;
}
</style>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container-fluid">
<div class="navbar-header navbar-left">
<ul class="nav nav-pills"><li class="active">
<a class="navbar-btn" data-toggle="collapse"
data-target="#viewnavbar" href="javascript:void(0)">
></a></li></ul>
</div>
<h3 class="navbar-text" style="color:white">{{title}}</h3>
<ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
id="viewnavbar">
{{for section in sections}}
<li class="active {{sectionvars[section]}}-btn">
<a href="javascript:void(0)"
onclick="togglebutton('.{{sectionvars[section]}}')">
{{section if section != 'mri' else 'MRI'}}
</a>
</li>
{{endfor}}
</ul>
</div>
</nav>
""")
footer_template = HTMLTemplate(u"""
</div></body>
<div class="footer">
© Copyright 2012-2013, MNE Developers.
Created on {{date}}.
Powered by <a href="http://martinos.org/mne">MNE.
</div>
</html>
""")
image_template = Template(u"""
{{default interactive = False}}
{{default width = 50}}
{{default id = False}}
{{default image_format = 'png'}}
{{default scale = None}}
{{default comment = None}}
<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
{{if not show}}style="display: none"{{endif}}>
{{if caption}}
<h4>{{caption}}</h4>
{{endif}}
<div class="thumbnail">
{{if not interactive}}
{{if image_format == 'png'}}
{{if scale is not None}}
<img alt="" style="width:{{width}}%;"
src="data:image/png;base64,{{img}}">
{{else}}
<img alt=""
src="data:image/png;base64,{{img}}">
{{endif}}
{{elif image_format == 'svg'}}
<div style="text-align:center;">
{{img}}
</div>
{{endif}}
{{if comment is not None}}
<br><br>
<div style="text-align:center;">
<style>
p.test {word-wrap: break-word;}
</style>
<p class="test">
{{comment}}
</p>
</div>
{{endif}}
{{else}}
<center>{{interactive}}</center>
{{endif}}
</div>
</li>
""")
repr_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4><hr>
{{repr}}
<hr></li>
""")
raw_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<table class="table table-hover">
<tr>
<th>Measurement date</th>
{{if meas_date is not None}}
<td>{{meas_date}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Experimenter</th>
{{if info['experimenter'] is not None}}
<td>{{info['experimenter']}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Digitized points</th>
{{if info['dig'] is not None}}
<td>{{len(info['dig'])}} points</td>
{{else}}
<td>Not available</td>
{{endif}}
</tr>
<tr>
<th>Good channels</th>
<td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
and {{n_eeg}} EEG channels</td>
</tr>
<tr>
<th>Bad channels</th>
{{if info['bads'] is not None}}
<td>{{', '.join(info['bads'])}}</td>
{{else}}<td>None</td>{{endif}}
</tr>
<tr>
<th>EOG channels</th>
<td>{{eog}}</td>
</tr>
<tr>
<th>ECG channels</th>
<td>{{ecg}}</td>
<tr>
<th>Measurement time range</th>
<td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
</tr>
<tr>
<th>Sampling frequency</th>
<td>{{u'%0.2f' % info['sfreq']}} Hz</td>
</tr>
<tr>
<th>Highpass</th>
<td>{{u'%0.2f' % info['highpass']}} Hz</td>
</tr>
<tr>
<th>Lowpass</th>
<td>{{u'%0.2f' % info['lowpass']}} Hz</td>
</tr>
</table>
</li>
""")
toc_list = Template(u"""
<li class="{{div_klass}}">
{{if id}}
<a href="javascript:void(0)" onclick="window.location.hash={{id}};">
{{endif}}
<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
{{if id}}</a>{{endif}}
</li>
""")
def _check_scale(scale):
"""Helper to ensure valid scale value is passed"""
if np.isscalar(scale) and scale <= 0:
raise ValueError('scale must be positive, not %s' % scale)
class Report(object):
"""Object for rendering HTML
Parameters
----------
info_fname : str
Name of the file containing the info dictionary.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : str
Name of the file containing the noise covariance.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction for evokeds.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
def __init__(self, info_fname=None, subjects_dir=None,
subject=None, title=None, cov_fname=None, baseline=None,
verbose=None):
self.info_fname = info_fname
self.cov_fname = cov_fname
self.baseline = baseline
self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
self.subject = subject
self.title = title
self.verbose = verbose
self.initial_id = 0
self.html = []
self.fnames = [] # List of file names rendered
self.sections = [] # List of sections
self._sectionlabels = [] # Section labels
self._sectionvars = {} # Section variable names in js
# boolean to specify if sections should be ordered in natural
# order of processing (raw -> events ... -> inverse)
self._sort_sections = False
self._init_render() # Initialize the renderer
def _get_id(self):
"""Get id of plot.
"""
self.initial_id += 1
return self.initial_id
def _validate_input(self, items, captions, section, comments=None):
"""Validate input.
"""
if not isinstance(items, (list, tuple)):
items = [items]
if not isinstance(captions, (list, tuple)):
captions = [captions]
if not isinstance(comments, (list, tuple)):
if comments is None:
comments = [comments] * len(captions)
else:
comments = [comments]
if len(comments) != len(items):
raise ValueError('Comments and report items must have the same '
'length or comments should be None.')
elif len(captions) != len(items):
raise ValueError('Captions and report items must have the same '
'length.')
# Book-keeping of section names
if section not in self.sections:
self.sections.append(section)
self._sectionvars[section] = _clean_varnames(section)
return items, captions, comments
def _add_figs_to_section(self, figs, captions, section='custom',
image_format='png', scale=None, comments=None):
"""Auxiliary method for `add_section` and `add_figs_to_section`.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
mayavi = None
try:
# on some version mayavi.core won't be exposed unless ...
from mayavi import mlab # noqa, mlab imported
import mayavi
except: # on some systems importing Mayavi raises SystemExit (!)
warnings.warn('Could not import mayavi. Trying to render '
'`mayavi.core.scene.Scene` figure instances'
' will throw an error.')
figs, captions, comments = self._validate_input(figs, captions,
section, comments)
_check_scale(scale)
for fig, caption, comment in zip(figs, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
if mayavi is not None and isinstance(fig, mayavi.core.scene.Scene):
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
img = imread(temp_fname)
else: # Testing mode
img = np.zeros((2, 2, 3))
mayavi.mlab.close(fig)
fig = plt.figure()
plt.imshow(img)
plt.axis('off')
img = _fig_to_img(fig=fig, scale=scale,
image_format=image_format)
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=True,
image_format=image_format,
comment=comment)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_figs_to_section(self, figs, captions, section='custom',
scale=None, image_format='png', comments=None):
"""Append custom user-defined figures.
Parameters
----------
figs : list of figures.
Each figure in the list can be an instance of
matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
or np.ndarray (images read in using scipy.imread).
captions : list of str
A list of captions to the figures.
section : str
Name of the section. If section already exists, the figures
will be appended to the end of the section
scale : float | None | callable
Scale the images maintaining the aspect ratio.
If None, no scaling is applied. If float, scale will determine
the relative scaling (might not work for scale <= 1 depending on
font sizes). If function, should take a figure object as input
parameter. Defaults to None.
image_format : {'png', 'svg'}
The image format to be used for the report. Defaults to 'png'.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the figure.
"""
return self._add_figs_to_section(figs=figs, captions=captions,
section=section, scale=scale,
image_format=image_format,
comments=comments)
def add_images_to_section(self, fnames, captions, scale=None,
section='custom', comments=None):
"""Append custom user-defined images.
Parameters
----------
fnames : str | list of str
A filename or a list of filenames from which images are read.
captions : str | list of str
A caption or a list of captions to the images.
scale : float | None
Scale the images maintaining the aspect ratio.
Defaults to None. If None, no scaling will be applied.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the image.
"""
# Note: using scipy.misc is equivalent because scipy internally
# imports PIL anyway. It's not possible to redirect image output
# to binary string using scipy.misc.
from PIL import Image
fnames, captions, comments = self._validate_input(fnames, captions,
section, comments)
_check_scale(scale)
for fname, caption, comment in zip(fnames, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
# Convert image to binary string.
im = Image.open(fname)
output = BytesIO()
im.save(output, format='png')
img = base64.b64encode(output.getvalue()).decode('ascii')
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=scale,
comment=comment,
show=True)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_htmls_to_section(self, htmls, captions, section='custom'):
"""Append htmls to the report.
Parameters
----------
htmls : str | list of str
An html str or a list of html str.
captions : str | list of str
A caption or a list of captions to the htmls.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
Notes
-----
.. versionadded:: 0.9.0
"""
htmls, captions, _ = self._validate_input(htmls, captions, section)
for html, caption in zip(htmls, captions):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_bem_to_section(self, subject, caption='BEM', section='bem',
decim=2, n_jobs=1, subjects_dir=None):
"""Renders a bem slider html str.
Parameters
----------
subject : str
Subject name.
caption : str
A caption for the bem.
section : str
Name of the section. If section already exists, the bem
will be appended to the end of the section.
decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
n_jobs : int
Number of jobs to run in parallel.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Notes
-----
.. versionadded:: 0.9.0
"""
caption = 'custom plot' if caption == '' else caption
html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
decim=decim, n_jobs=n_jobs, section=section,
caption=caption)
html, caption = self._validate_input(html, caption, section)
sectionvar = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption[0], sectionvar))
self._sectionlabels.append(sectionvar)
self.html.extend(html)
###########################################################################
# HTML rendering
def _render_one_axis(self, slices_iter, name, global_id, cmap,
n_elements, n_jobs):
"""Render one axis of the array.
"""
global_id = global_id or name
html = []
slices, slices_range = [], []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
use_jobs = min(n_jobs, max(1, n_elements))
parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
for ind, data in slices_iter)
slices_range, slices = zip(*r)
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(slices_range, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
###########################################################################
# global rendering functions
@verbose
def _init_render(self, verbose=None):
"""Initialize the renderer.
"""
inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
'bootstrap.min.js', 'jquery-ui.min.css',
'bootstrap.min.css']
include = list()
for inc_fname in inc_fnames:
logger.info('Embedding : %s' % inc_fname)
f = open(op.join(op.dirname(__file__), 'html', inc_fname),
'r')
if inc_fname.endswith('.js'):
include.append(u'<script type="text/javascript">' +
f.read() + u'</script>')
elif inc_fname.endswith('.css'):
include.append(u'<style type="text/css">' +
f.read() + u'</style>')
f.close()
self.include = ''.join(include)
@verbose
def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
sort_sections=True, on_error='warn', verbose=None):
"""Renders all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
n_jobs : int
Number of jobs to run in parallel.
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
valid_errors = ['ignore', 'warn', 'raise']
if on_error not in valid_errors:
raise ValueError('on_error must be one of %s, not %s'
% (valid_errors, on_error))
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = 'MNE Report for ...%s' % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(_recursive_search(self.data_path, p))
if self.info_fname is not None:
info = read_info(self.info_fname)
sfreq = info['sfreq']
else:
warnings.warn('`info_fname` not provided. Cannot render'
'-cov.fif(.gz) and -trans.fif(.gz) files.')
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info('Iterating over %s potential files (this may take some '
'time)' % len(fnames))
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error)
for fname in np.array_split(fnames, use_jobs))
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if
fname is not None]
self._sectionlabels = [slabel for slabel in
sum(report_sectionlabels, [])
if slabel is not None]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if self.subjects_dir is not None and self.subject is not None:
logger.info('Rendering BEM')
self.html.append(self._render_bem(self.subject, self.subjects_dir,
mri_decim, n_jobs))
self.fnames.append('bem')
self._sectionlabels.append('mri')
else:
warnings.warn('`subjects_dir` and `subject` not provided.'
' Cannot render MRI and -trans.fif(.gz) files.')
def save(self, fname=None, open_browser=True, overwrite=False):
"""Save html report and open it in browser.
Parameters
----------
fname : str
File name of the report.
open_browser : bool
Open html browser after saving if True.
overwrite : bool
If True, overwrite report if it already exists.
"""
if fname is None:
if not hasattr(self, 'data_path'):
self.data_path = op.dirname(__file__)
warnings.warn('`data_path` not provided. Using %s instead'
% self.data_path)
fname = op.realpath(op.join(self.data_path, 'report.html'))
else:
fname = op.realpath(fname)
self._render_toc()
html = footer_template.substitute(date=time.strftime("%B %d, %Y"))
self.html.append(html)
if not overwrite and op.isfile(fname):
msg = ('Report already exists at location %s. '
'Overwrite it (y/[n])? '
% fname)
answer = moves.input(msg)
if answer.lower() == 'y':
overwrite = True
if overwrite or not op.isfile(fname):
logger.info('Saving report to location %s' % fname)
fobj = codecs.open(fname, 'w', 'utf-8')
fobj.write(_fix_global_ids(u''.join(self.html)))
fobj.close()
# remove header, TOC and footer to allow more saves
self.html.pop(0)
self.html.pop(0)
self.html.pop()
if open_browser:
import webbrowser
webbrowser.open_new_tab('file://' + fname)
return fname
@verbose
def _render_toc(self, verbose=None):
"""Render the Table of Contents.
"""
logger.info('Rendering : Table of Contents')
html_toc = u'<div id="container">'
html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
global_id = 1
# Reorder self.sections to reflect natural ordering
if self._sort_sections:
sections = list(set(self.sections) & set(SECTION_ORDER))
custom = [section for section in self.sections if section
not in SECTION_ORDER]
order = [sections.index(section) for section in SECTION_ORDER if
section in sections]
self.sections = np.array(sections)[order].tolist() + custom
# Sort by section
html, fnames, sectionlabels = [], [], []
for section in self.sections:
logger.info('%s' % section)
for sectionlabel, this_html, fname in (zip(self._sectionlabels,
self.html, self.fnames)):
if self._sectionvars[section] == sectionlabel:
html.append(this_html)
fnames.append(fname)
sectionlabels.append(sectionlabel)
logger.info('\t... %s' % fname[-20:])
color = _is_bad_fname(fname)
div_klass, tooltip, text = _get_toc_property(fname)
# loop through conditions for evoked
if fname.endswith(('-ave.fif', '-ave.fif.gz',
'(whitened)')):
text = os.path.basename(fname)
if fname.endswith('(whitened)'):
fname = fname[:-11]
# XXX: remove redundant read_evokeds
evokeds = read_evokeds(fname, verbose=False)
html_toc += toc_list.substitute(
div_klass=div_klass, id=None, tooltip=fname,
color='#428bca', text=text)
html_toc += u'<li class="evoked"><ul>'
for ev in evokeds:
html_toc += toc_list.substitute(
div_klass=div_klass, id=global_id,
tooltip=fname, color=color, text=ev.comment)
global_id += 1
html_toc += u'</ul></li>'
elif fname.endswith(tuple(VALID_EXTENSIONS +
['bem', 'custom'])):
html_toc += toc_list.substitute(div_klass=div_klass,
id=global_id,
tooltip=tooltip,
color=color,
text=text)
global_id += 1
html_toc += u'\n</ul></div>'
html_toc += u'<div id="content">'
# The sorted html (according to section)
self.html = html
self.fnames = fnames
self._sectionlabels = sectionlabels
html_header = header_template.substitute(title=self.title,
include=self.include,
sections=self.sections,
sectionvars=self._sectionvars)
self.html.insert(0, html_header) # Insert header at position 0
self.html.insert(1, html_toc) # insert TOC
def _render_array(self, array, global_id=None, cmap='gray',
limits=None, n_jobs=1):
"""Render mri without bem contours.
"""
html = []
html.append(u'<div class="row">')
# Axial
limits = limits or {}
axial_limit = limits.get('axial')
axial_slices_gen = _iterate_axial_slices(array, axial_limit)
html.append(
self._render_one_axis(axial_slices_gen, 'axial',
global_id, cmap, array.shape[1], n_jobs))
# Sagittal
sagittal_limit = limits.get('sagittal')
sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
html.append(
self._render_one_axis(sagittal_slices_gen, 'sagittal',
global_id, cmap, array.shape[1], n_jobs))
html.append(u'</div>')
html.append(u'<div class="row">')
# Coronal
coronal_limit = limits.get('coronal')
coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
html.append(
self._render_one_axis(coronal_slices_gen, 'coronal',
global_id, cmap, array.shape[1], n_jobs))
# Close section
html.append(u'</div>')
return '\n'.join(html)
def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
shape, orientation='coronal', decim=2, n_jobs=1):
"""Render one axis of bem contours.
"""
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
n_slices = shape[orientation_axis]
orig_size = np.roll(shape, orientation_axis)[[1, 2]]
name = orientation
html = []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
sl = np.arange(0, n_slices, decim)
kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
orientation=orientation, img_output=orig_size)
imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
slices = []
img_klass = 'slideimg-%s' % name
div_klass = 'span12 %s' % slides_klass
for ii, img in enumerate(imgs):
slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
caption = u'Slice %s %s' % (name, sl[ii])
first = True if ii == 0 else False
slices.append(_build_html_image(img, slice_id, div_klass,
img_klass, caption, first))
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(sl, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
def _render_image(self, image, cmap='gray', n_jobs=1):
"""Render one slice of mri without bem.
"""
import nibabel as nib
global_id = self._get_id()
if 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
nim = nib.load(image)
data = nim.get_data()
shape = data.shape
limits = {'sagittal': range(0, shape[0], 2),
'axial': range(0, shape[1], 2),
'coronal': range(0, shape[2], 2)}
name = op.basename(image)
html = u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += self._render_array(data, global_id=global_id,
cmap=cmap, limits=limits,
n_jobs=n_jobs)
html += u'</li>\n'
return html
def _render_raw(self, raw_fname):
"""Render raw.
"""
global_id = self._get_id()
div_klass = 'raw'
caption = u'Raw : %s' % raw_fname
raw = Raw(raw_fname)
n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
n_grad = len(pick_types(raw.info, meg='grad'))
n_mag = len(pick_types(raw.info, meg='mag'))
pick_eog = pick_types(raw.info, meg=False, eog=True)
if len(pick_eog) > 0:
eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
else:
eog = 'Not available'
pick_ecg = pick_types(raw.info, meg=False, ecg=True)
if len(pick_ecg) > 0:
ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
else:
ecg = 'Not available'
meas_date = raw.info['meas_date']
if meas_date is not None:
meas_date = dt.fromtimestamp(meas_date[0]).strftime("%B %d, %Y")
tmin = raw.first_samp / raw.info['sfreq']
tmax = raw.last_samp / raw.info['sfreq']
html = raw_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
info=raw.info,
meas_date=meas_date,
n_eeg=n_eeg, n_grad=n_grad,
n_mag=n_mag, eog=eog,
ecg=ecg, tmin=tmin, tmax=tmax)
return html
def _render_forward(self, fwd_fname):
"""Render forward.
"""
div_klass = 'forward'
caption = u'Forward: %s' % fwd_fname
fwd = read_forward_solution(fwd_fname)
repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_fwd)
return html
def _render_inverse(self, inv_fname):
"""Render inverse.
"""
div_klass = 'inverse'
caption = u'Inverse: %s' % inv_fname
inv = read_inverse_operator(inv_fname)
repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_inv)
return html
def _render_evoked(self, evoked_fname, baseline=None, figsize=None):
"""Render evoked.
"""
evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
html = []
for ev in evokeds:
global_id = self._get_id()
kwargs = dict(show=False)
img = _fig_to_img(ev.plot, **kwargs)
caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
has_types = []
if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
has_types.append('eeg')
if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
has_types.append('grad')
if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
has_types.append('mag')
for ch_type in has_types:
kwargs.update(ch_type=ch_type)
img = _fig_to_img(ev.plot_topomap, **kwargs)
caption = u'Topomap (ch_type = %s)' % ch_type
html.append(image_template.substitute(img=img,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_eve(self, eve_fname, sfreq=None):
"""Render events.
"""
global_id = self._get_id()
events = read_events(eve_fname)
kwargs = dict(events=events, sfreq=sfreq, show=False)
img = _fig_to_img(plot_events, **kwargs)
caption = 'Events : ' + eve_fname
div_klass = 'events'
img_klass = 'events'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_epochs(self, epo_fname):
"""Render epochs.
"""
global_id = self._get_id()
epochs = read_epochs(epo_fname)
kwargs = dict(subject=self.subject, show=False)
img = _fig_to_img(epochs.plot_drop_log, **kwargs)
caption = 'Epochs : ' + epo_fname
div_klass = 'epochs'
img_klass = 'epochs'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_cov(self, cov_fname, info_fname):
"""Render cov.
"""
global_id = self._get_id()
cov = read_cov(cov_fname)
fig, _ = plot_cov(cov, info_fname, show=False)
img = _fig_to_img(fig=fig)
caption = 'Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)
div_klass = 'covariance'
img_klass = 'covariance'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline):
"""Show whitened evoked.
"""
global_id = self._get_id()
evokeds = read_evokeds(evoked_fname, verbose=False)
html = []
for ev in evokeds:
ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
verbose=False)
global_id = self._get_id()
kwargs = dict(noise_cov=noise_cov, show=False)
img = _fig_to_img(ev.plot_white, **kwargs)
caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_trans(self, trans, path, info, subject,
subjects_dir, image_format='png'):
"""Render trans.
"""
kwargs = dict(info=info, trans=trans, subject=subject,
subjects_dir=subjects_dir)
try:
img = _iterate_trans_views(function=plot_trans, **kwargs)
except IOError:
img = _iterate_trans_views(function=plot_trans, source='head',
**kwargs)
if img is not None:
global_id = self._get_id()
caption = 'Trans : ' + trans
div_klass = 'trans'
img_klass = 'trans'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=75,
show=show)
return html
def _render_bem(self, subject, subjects_dir, decim, n_jobs,
section='mri', caption='BEM'):
"""Render mri+bem.
"""
import nibabel as nib
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warnings.warn('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
warnings.warn('Subject bem directory "%s" does not exist' %
bem_path)
return self._render_image(mri_fname, cmap='gray', n_jobs=n_jobs)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
else:
warnings.warn('No surface found for %s.' % surf_name)
return self._render_image(mri_fname, cmap='gray')
surf_fnames.append(surf_fname)
# XXX : find a better way to get max range of slices
nim = nib.load(mri_fname)
data = nim.get_data()
shape = data.shape
del data # free up memory
html = []
global_id = self._get_id()
if section == 'mri' and 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
name = caption
html += u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += u'<div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'axial', decim, n_jobs)
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'sagittal', decim, n_jobs)
html += u'</div><div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'coronal', decim, n_jobs)
html += u'</div>'
html += u'</li>\n'
return ''.join(html)
def _clean_varnames(s):
# Remove invalid characters
s = re.sub('[^0-9a-zA-Z_]', '', s)
# add report_ at the beginning so that the javascript class names
# are valid ones
return 'report_' + s
def _recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory.
"""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
# only the following file types are supported
# this ensures equitable distribution of jobs
if f.endswith(tuple(VALID_EXTENSIONS)):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
def _fix_global_ids(html):
"""Auxiliary function for fixing the global_ids after reordering in
_render_toc().
"""
html = re.sub('id="\d+"', 'id="###"', html)
global_id = 1
while len(re.findall('id="###"', html)) > 0:
html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
global_id += 1
return html
| bsd-3-clause |
aolindahl/aolPyModules | tof.py | 1 | 31988 | #from setupEnvironment import *
import numpy as np
from configuration import loadConfiguration, load_configuration_dict
import time
import wiener
from scipy.sparse import coo_matrix
import sys
import simplepsana
import aolUtil
_useWavelet = True
if _useWavelet:
try:
from wavelet_filter import wavelet_filt as wavelet
except:
print 'Wavelet filtering not avaliable. pywt not found.'
_useWavelet = False
m_e_eV = 0.510998928e6 # http://physics.nist.gov/cgi-bin/cuu/Value?me 2014-04-21
c_0_mps = 299792458 # http://physics.nist.gov/cgi-bin/cuu/Value?c|search_for=universal_in! 2014-04-21
def edges_from_centers(centers):
step = np.diff(centers).mean()
return np.concatenate([centers - step/s, centers[-1] + step/2])
def get_acqiris_scales(env, source_string, channel, verbose=False):
# If a time scale is given, use it, oterhwhise try to get is from the
# env object.
if verbose:
print 'Get the time scale of the acqiris using:'
print '\tenv =', env
print '\tsource_string =', source_string
time_scale_us = simplepsana.get_acqiris_time_scale_us(env, source_string,
verbose = verbose)
if time_scale_us is None:
print ('WARNING: No acqiris configuration obtained,' +
' no scales aquired.')
return None, 1, 0
if verbose:
print 'Get the vertical scaling of the acqiris channel.'
vert_scaling_V, vert_offset_V = \
simplepsana.get_acqiris_signal_scaling(env, source_string, channel,
verbose = verbose)
return time_scale_us, vert_scaling_V, vert_offset_V
def energy_from_time_physical(time, D_mm=None, prompt_us=None, t_offset_us=0,
E_offset_eV=0, verbose=False):
# The conversion is given by:
# E = D^2 m_e 10^6 / ( 2 c_0^2 (t - t_p)^2 ) + E0,
# where:
# D is the flight distance in mm
# m_e is the electon rest mass expressed in eV
# the 10^6 factor accounts the the otherwhise
# missmachching prefixes
# c_0 is the speed of light in m/s
# E is the energy in eV
# E0 is an energy offset in eV, should be
# determined in a callibration fit
# t_p is the arrival time of the prompt signal in microseconds
if verbose:
print 'In tof.energy_from_time_physical()'
return (D_mm**2 * m_e_eV * 1e6 /
(c_0_mps**2 * 2 * (time - prompt_us - t_offset_us)**2) + E_offset_eV)
def get_time_to_energy_conversion(time_scale_us, energy_scale_eV, verbose=False,
D_mm=None, prompt_us=None, t_offset_us=0,
E_offset_eV=0):
if verbose:
print 'In "tof.get_energy_scale_and conversion()."'
# Get basic data about the time scale
dt = np.diff(time_scale_us).mean()
t0 = time_scale_us[0]
num_time_bins = len(time_scale_us)
# Calculate bin edges in the time domain
time_scale_t_edges = aolUtil.limits_from_centers(time_scale_us)
# and the corresponding bin edges in the energy domain.
time_scale_E_edges = energy_from_time_physical(time_scale_t_edges,
D_mm=D_mm,
prompt_us=prompt_us,
t_offset_us=t_offset_us,
E_offset_eV=E_offset_eV)
if verbose:
print 'Time scale E edges are:', time_scale_E_edges
# Number of energy bins
#print energy_scale_eV
num_energy_bins = len(energy_scale_eV)
# Energy bin size
dE = np.diff(energy_scale_eV).mean()
# energy scale bin limits
energy_scale_E_edges = aolUtil.limits_from_centers(energy_scale_eV)
# Make matrixes out of the edges vectors in energy domain
mat_time_E = np.concatenate([time_scale_E_edges.reshape(1,-1)] *
num_energy_bins)
mat_energy_E= np.concatenate([energy_scale_E_edges.reshape(-1,1)] *
num_time_bins, axis=1)
# Compute the start and end energies for the conversion from the time axis
# to energy axis
high_E_limit = ( np.minimum( mat_time_E[:,:-1], mat_energy_E[1:,:] ) )
low_E_limit = ( np.maximum( mat_time_E[:,1:], mat_energy_E[:-1,:] ) )
# Only where the high energy is more than the low energy the conversion
# makes anny sense
I = low_E_limit < high_E_limit
# Allocate a tempoarary conversion matrix
temp_conversion_mat = np.zeros((num_energy_bins, num_time_bins))
# Calculate the elements of the conversion matrix
# For each time bin the measured amplitude is multiplied by the bin size
# in order to arrive at the integral of the signal. Then it is
# determined how much of each time bin contributes to each energy bin.
# This is done by comparing the edge positions of the time and energy
# bins and assigning the correct proportion of the integral in time
# domain to integral in the energy domain. Finally the total integral is
# divided by the energy bin size in order to return to an amplitude.
# Summation over all time bins is performed in the matrix multiplication
# of the conversion matrix with the time domain amplitude vector.
temp_conversion_mat[I] = (dt * (high_E_limit[I] - low_E_limit[I]) /
( mat_time_E[:,:-1] - mat_time_E[:,1:] )[I] / dE)
# The conversion matrix is highly sparse, thus make a sparse matrix to
# spped up the calculationss
conversion_mat = coo_matrix(temp_conversion_mat)
# Create the energy scale
# Find out which part of the time scale is after the prompt peak
# The calibration information is used for this
I = time_scale_us > prompt_us + t_offset_us
# Allocate an energy scale with -1 value (unphysical).
raw_energy_scale_eV = -np.ones_like(time_scale_us)
raw_energy_scale_eV[I] = energy_from_time_physical(time_scale_us[I],
D_mm=D_mm,
prompt_us=prompt_us,
t_offset_us=t_offset_us,
E_offset_eV=E_offset_eV)
# Set the Energies that correspond to times befor the prompt to the
# maximum energy.
raw_energy_scale_eV[~I] = np.max(raw_energy_scale_eV)
return conversion_mat, raw_energy_scale_eV
def get_acqiris_data(evt, source_string, channel, scaling=1., offset=0,
invert=True, selection=slice(None), verbose=False):
raw_data = simplepsana.get_acqiris_waveform(evt, source_string,channel,
verbose=verbose)
if raw_data is None:
return None
invert_factor = -1. if invert else 1.
return invert_factor * (raw_data[selection] * scaling - offset)
class TofData(object):
"""Class to handle TOF data.
Conversions to energy scale including rescaling of the amplitudes\
"""
def __init__(self, config, verbose=False):
"""\
Initialize the TofData class giving it a configutaion object.\
"""
# Extract the data source
self._source_string = config['detectorSource']
self._source = simplepsana.get_source(config['detectorSource'])
self._acqiris_source_string = config['detectorSource']
# Extract the acqiris channel
self._acqiris_channel = config['acqCh']
# Load the callibration file pointed to in the configuration
if verbose:
print 'Load the calibration from file "{}".'.format(
config['calib_file'])
self._calibration= load_configuration_dict(config['calib_file'],
verbose=verbose)
# Store the configuration
self._config = config
# Setup the basic rescaling
self._acq_vert_scaling = 1
self._acq_vert_offset = 0
# Initialy the class does not contain any data or any scales
self._no_data = True
self._no_scales = True
# Basic info about filtering
self._filter_time_domain = False
self._filter_method = None
self._time_amplitude_filtered = None
self._verbose = verbose
if 'filterMethod' in self._config.keys():
if self._config['filterMethod'] == "wienerDeconv":
self.setup_time_domain_filtering(method='wienerDeconv',
SNR=self._config['filterWienerSNR'],
response=self._config['filterWienerResponse'])
elif self._config['filterMethod'] == 'wavelet':
self.setup_time_domain_filtering(method='wavelet',
levels=self._config["filterWaveletLevels"])
elif self._config['filterMethod'] == 'average':
if self._verbose:
print 'Using averaging with {} points'.format(
self._config['filterAverageNumPoints'])
self.setup_time_domain_filtering(method='average',
numPoints=self._config['filterAverageNumPoints'])
self._time_amplitude = None
self._time_roi_slice = [None, None, None, None]
self._energy_roi_slice =[None, None, None, None]
self._bgWeight = None
def setup_scales(self, energy_scale_eV, env=None,
time_scale_us=None, retardation=0):
"""Setup the information about the acqiris channel used for the TOF.
Reads the scale factors for the raw aquiris data and also calculates
the conversion to the energy domain.
"""
if self._verbose:
print 'Seting up the scales.'
# Time scale
if time_scale_us is None:
self._time_scale_us, self._acq_vert_scaling, self._acq_vert_offset = \
get_acqiris_scales(env, self._acqiris_source_string,
self._acqiris_channel,
verbose=self._verbose)
else:
self._time_scale_us = time_scale_us.copy()
if self._time_scale_us is None:
if self._verbose:
print 'No scales found.'
self._no_scales = True
return
# Time sliceing
if self._verbose:
print 'Seting up the time slicing.'
if ('t_slice' in self._config) and (self._config['t_slice'] == True):
if 't_min_us' in self._config:
slice_start = self._time_scale_us.searchsorted(
self._config['t_min_us'])
else:
slice_start = None
if 't_max_us' in self._config:
slice_end = self._time_scale_us.searchsorted(
self._config['t_max_us'])
else:
slice_end = None
self._time_slice = slice(slice_start, slice_end)
else:
self._time_slice = slice(None)
if self._verbose:
print 'Time slice is: {}.'.format(self._time_slice)
# Adjust the time scale
self._time_scale_us = self._time_scale_us[self._time_slice]
# Energy scale and conversion matrix
self._calibration['E_offset_eV'] += retardation
self._energy_scale_eV = energy_scale_eV
#self._energy_scale_eV = energy_scale_eV[
# energy_scale_eV.searchsorted(self._calibration['E_offset_eV']):]
self._energy_bin_size = np.diff(energy_scale_eV).mean()
self._time_to_energy_matrix, self._raw_energy_scale_eV = \
get_time_to_energy_conversion(self._time_scale_us,
self._energy_scale_eV,
verbose=self._verbose,
**self._calibration)
# Set the region of interest slices
if self._verbose:
print 'Looking for ROIs'
for domain, roiBase in zip(
['Time', 'Energy'],
['time_roi_{}_us', 'energy_roi_{}_eV']):
for iRoi in range(4):
roi = roiBase.format(iRoi)
if self._verbose:
print 'Looking for {}.'.format(roi)
if roi in self._config:
self.set_base_roi(
min = self._config[roi][0],
max = self._config[roi][1],
roi = iRoi,
domain = domain)
if self._verbose:
print '{} found'.format(roi),
print self._config[roi],
print self._time_roi_slice[iRoi]
# Make a backgeound slice
if self._verbose:
print 'Make background slice.'
if self._config['baselineSubtraction'] == 'early':
self._bgSlice = slice(self._time_scale_us.searchsorted(
self._config['baselineEnd_us']))
elif self._config['baselineSubtraction'] == 'roi':
try:
self._bgSlice = slice(
min( [i.stop for i in self._time_roi_slice] ),
max( [i.start for i in self._time_roi_slice] ) )
except:
print "Could not set the gsSlice from the roi's."
print "Attempted slice({}, {})".format(
min( [i.stop for i in self._time_roi_slice] ),
max( [i.start for i in self._time_roi_slice] ) )
self._bgSlice = slice(0,0)
else:
self._bgSlice = slice(0,0)
# Check if there is actually something to calculate the background
# from
if len(self._time_scale_us[self._bgSlice]) < 1:
self._config['baselineSubtraction'] = 'none'
if self._verbose:
print 'Background slice is empty.'
# There are scales, change the flag
self._no_scales = False
def set_base_roi(self, min, max, roi=0, domain='Time'):
if domain=='Time':
a = self._time_scale_us.searchsorted(min)
b = a + self._time_scale_us[a:].searchsorted(max)
self._time_roi_slice[roi] = slice(a,b)
elif domain=='Energy':
a = self._energy_scale_eV.searchsorted(min)
b = a + self._energy_scale_eV[a:].searchsorted(max)
self._energy_roi_slice[roi] = slice(a,b)
def set_baseline_subtraction_averaging(self, weight_last):
self._bg_weight_last = weight_last
self._bg_weight_history = 1-weight_last
self._bg_history = None
def set_raw_data(self, evt=None, timeAmplitude_V=None, newDataFactor=None):
"""
Set waveform data and compute the scaled data.
"""
if self._verbose:
print 'In tof.TofData.set_raw_data().'
# If a psana event was passed as a parametere
if evt is not None:
if self._verbose:
print 'Event object given.'
new = get_acqiris_data(evt, self._source_string,
self._acqiris_channel,
scaling=self._acq_vert_scaling,
offset=self._acq_vert_offset,
invert=True, selection=self._time_slice,
verbose=self._verbose)
if new is None:
if self._verbose:
print 'Could not extract any valide data.'
self._no_data = True
return
if (self._time_amplitude is None or newDataFactor == 1 or
newDataFactor is None):
if self._verbose:
print 'Using the new data.'
self._time_amplitude = new
else:
if self._verbose:
print 'Updating rolling average.'
self._time_amplitude = (self._time_amplitude * (1.0-newDataFactor)
+ new * newDataFactor)
# If set up to do that, subtract the baseline
if self._config['baselineSubtraction'] != 'none':
if self._verbose:
print 'Performing baseling subtraction.'
if self._bgWeight is None:
self._time_amplitude -= \
self._time_amplitude[self._bgSlice].mean()
else:
if self._bg_history is None:
self._bg_history = \
self._time_amplitude[self._bgSlice].mean()
else:
self._bg_history *= self._bg_weight_history
self._bg_history += self._bg_weight_last \
* self._time_amplitude[self._bgSlice].mean()
self._time_amplitude -= self._bg_history
elif timeAmplitude_V is None:
if self._verbose:
print 'Niether event nor time scale given.'
# If neither psana event, nor amplitude was given
self._no_data = True
return
else:
# If no psana event was given but threre is an amplitude
self._time_amplitude = timeAmplitude_V[self._time_slice]
if self._verbose:
print 'Apply time domain filter.'
self._filter_time_domain_data()
if self._verbose:
print 'Calculate energy amplitudes.'
self.calc_energy_amplitude()
self._no_data = False
def calc_energy_amplitude(self, filtered=None):
if self._filter_time_domain and filtered is not False:
tAmp = self._time_amplitude_filtered
else:
tAmp = self._time_amplitude
# Calculate the signal amplitude in the energy domain.
self._energy_amplitude = self._time_to_energy_matrix.dot(tAmp)
#self._energy_amplitude = np.ones(len(self._energy_scale_eV))
return
def get_time_scale_us(self, roi=None):
if self._no_scales:
return None
if self._verbose:
print 'TofData._time_roi_slice =', self._time_roi_slice
print 'TofData._time_scale_us =', self._time_scale_us
if roi!=None and self._time_roi_slice!=None:
return self._time_scale_us[self._time_roi_slice[roi]]
return self._time_scale_us
def get_time_amplitude(self, roi=None):
#if self._verbose:
# print 'In tof.TofData.get_time_amplitude.'
# print 'Has', 'no' if self._no_data else None, 'data.'
# print self._time_amplitude
if self._no_data:
return None
if (roi is not None) and (self._time_roi_slice is not None):
return self._time_amplitude[self._time_roi_slice[roi]]
return self._time_amplitude
def get_time_amplitude_filtered(self, roi=None):
if self._no_data:
return None
if roi!=None: #and self._time_roi_slice!=None:
return self._time_amplitude_filtered[self._time_roi_slice[roi]]
return self._time_amplitude_filtered
def get_energy_amplitude(self, roi=None):
if self._no_data:
if self._verbose:
print 'No data in TofData object.'
return None
if roi!=None and self._energy_roi_slice!=None:
return self._energy_amplitude[self._energy_roi_slice[roi]]
return self._energy_amplitude
def get_energy_scale_eV(self, roi=None):
if self._no_scales:
return None
if roi!=None and self._energy_roi_slice!=None:
return self._energy_scale_eV[self._energy_roi_slice[roi]]
return self._energy_scale_eV
def get_raw_energy_scale(self, roi=None):
if self._no_scales:
return None
if roi!=None and self._time_roi_slice!=None:
return self._energy_scale_eV[self._time_roi_slice[roi]]
return self._raw_energy_scale_eV
def setup_time_domain_filtering(self, method='wienerDeconv', numPoints=4, levels=6,
SNR=1, response=None):
if self._verbose:
'In tof.TofData.setup_time_domain_filtering().'
if method is False:
self._filter_time_domain = False
return
self._filter_method = method
self._filterNumPoints = numPoints
self._filterLevels = levels
self._filter_time_domain = True
if method == 'wienerDeconv':
if type(SNR) == str:
self._SNR = np.fromfile(SNR)
else:
self._SNR = SNR
if type(response) == str:
self._response = np.fromfile(response)
else:
self._response = response
def _filter_time_domain_data(self):
if self._verbose:
print ('In tof.TofData._filter_time_domain_data(),' +
' with method = {}.'.format(self._filter_method))
if self._filter_time_domain is False:
self._time_amplitude_filtered = self._time_amplitude
return
if self._filter_method == 'average':
self._time_amplitude_filtered = \
np.convolve(self._time_amplitude,
np.ones((self._filterNumPoints,))
/ self._filterNumPoints, mode='same')
return
if self._filter_method == 'wavelet' and _useWavelet:
#print 'Wavelet filteing'
self._time_amplitude_filtered = wavelet(self._time_amplitude,
levels=self._filterLevels)
return
if self._filter_method == 'wienerDeconv':
self._time_amplitude_filtered = wiener.deconvolution(
self._time_amplitude, self._SNR, self._response)
return
print '{} is not a valid filtering method when "_useWavelet" is {}.'\
.format(self._filter_method, _useWavelet)
def get_trace_bounds(self, threshold_V=0.02, min_width_eV=2,
energy_offset=0,
useRel=False, threshold_rel=0.5,
roi=None):
if self._no_data:
return [np.nan] * 3
amp = self.get_energy_amplitude(roi=roi)
if useRel:
threshold_temp = threshold_rel * \
np.max(amp[np.isfinite(amp)])
if threshold_temp < threshold_V:
return [np.nan for i in range(3)]
else:
threshold_V = threshold_temp
nPoints = np.round(min_width_eV/self._energy_bin_size)
min = 0
for i in range(1, amp.size):
if amp[i] < threshold_V:
min = i
continue
if i-min >= nPoints:
break
else:
return [np.nan] * 3
max = amp.size - 1
for i in range(amp.size-1, -1, -1):
if amp[i] < threshold_V:
max = i
continue
if max-i >= nPoints:
break
else:
return [np.nan] * 3
if min == 0 and max == amp.size - 1:
return [np.nan] * 3
#print 'min =', min, 'max =', max
min = self.get_energy_scale_eV(roi=roi)[min] - energy_offset
max = self.get_energy_scale_eV(roi=roi)[max] - energy_offset
return min, max, threshold_V
def get_pulse_duration(self, lo, hi):
if self._no_data:
return None
amplitude = self._config.tof_maxStreaking_eV
cutoff = self._config.tof_streakingCutoff_eV
if hi > cutoff or lo < -cutoff:
return None
dur = (np.arccos(lo/amplitude) - np.arccos(hi/amplitude)) / np.pi * \
self._config.tof_quarterCycle_fs
return dur
def get_moments(self, domain='Time', roi=None):
if domain == 'Time':
x = self.get_time_scale_us(roi=roi)
y = self.get_time_amplitude_filtered(roi=roi)
elif domain == 'Energy':
x = self.get_energy_scale_eV(roi=roi)
y = self.get_energy_amplitude(roi=roi)
else:
print 'Error: {} is not a valid domain'.format(domain)
return None
if y.sum() == 0:
return np.nan, np.nan
center = np.average(x, weights=y)
width = np.sqrt( np.average((x-center)**2, weights=y-y.min()) )
return center, width
def psanaTester(do_plot=False):
import scipy.signal
import time
if do_plot:
from ZmqSender import zmqSender
import matplotlib.pyplot as plt
# Load the config file
import cookiebox_default_config as config
# Create the sender, but only if zmq should be used
if do_plot:
if config.useZmq:
sender = zmqSender()
else:
plt.ion()
# Create the TofData object
tof = TofData(config.basic_tof_config, verbose=True)
tof.setup_time_domain_filtering(method='wavelet', numPoints=4)
EMin = config.minE_eV
EMax = config.maxE_eV
threshold = 0.02
min_width_eV = 3
yValuesBar = np.array((0,2,1,1,2,0,1,1,2,0))
xValuesBar = np.zeros(np.size(yValuesBar))
# Connect to data source
print 'Connecting to data soutrce:', config.dataSource
ds = simplepsana.get_data_source(config.offline_source)
print 'done'
for num, evt in enumerate(ds.events()):
print 'event ', num
if num >= config.nEvents:
break
if num is 0:
#Initialize the scalse
print '\nInitialize the scales in the tof object.'
tof.setup_scales(np.linspace(EMin, EMax, config.nEnergyBins),
env=ds.env())
# Get the x scales
print '\nGet the time scale.'
t = tof.get_time_scale_us()
print '\nGet the energy scale.'
E = tof.get_energy_scale_eV()
print '\nGet the raw energy scale.'
rawE = tof.get_raw_energy_scale()
# setup local plotting
if do_plot:
print 'Initialize plotting.'
f1 = plt.figure(1)
f1.clf()
a1 = f1.add_subplot(211)
l11, l12, = a1.plot(t,t, t,t, 'r')
a1.autoscale_view(scalex=False)
a2 = f1.add_subplot(212)
l21, l22, l23= a2.plot(rawE,rawE, E,E, 'r.', yValuesBar, yValuesBar,
'k')
a2.set_xlim(EMin, EMax)
f1.show()
# Get the y data
print '\nSet raw data ov event to tof object.'
tof.set_raw_data(evt)
print '\nGet some vectors.'
tAmpRaw = tof.get_time_amplitude()
tAmpF = tof.get_time_amplitude_filtered()
EAmp = tof.get_energy_amplitude()
t_timing = time.time()
min, max, th = tof.get_trace_bounds(threshold, min_width_eV)
print 'thresholding time:', time.time()-t_timing
print 'min =', min, 'max =', max, 'threshold = ', th
print 'bar y:', yValuesBar
xValuesBar[:3] = min
xValuesBar[3:7] = (min+max)/2
xValuesBar[7:] = max
if do_plot:
# local plotting
l11.set_ydata(tAmpRaw)
l12.set_ydata(tAmpF)
a1.relim()
a1.autoscale_view(scalex=False)
l21.set_ydata(tAmpF)
l22.set_ydata(EAmp)
l23.set_ydata(yValuesBar*th)
l23.set_xdata(xValuesBar)
a2.relim()
a2.autoscale_view(scalex=False)
#a2.xlim(EMin, EMax)
f1.canvas.draw()
# Remote plotting
if config.useZmq:
packet = []
if num is 0:
plot1 = linePlot((line(t, tAmpRaw),
line(t, tAmpF)))
plot2 = linePlot((line(E, EAmp), line(xValuesBar, yValuesBar*th)))
else:
plot1 = linePlot((line(y=tAmpRaw), line(y=tAmpF)))
plot2 = linePlot((line(y=EAmp), line(x=xValuesBar, y=yValuesBar*th)))
packet.append(plot1)
packet.append(plot2)
sender.sendObject(packet)
time.sleep(1)
raw_input('Press enter to exit...')
if config.useZmq and do_plot:
del sender
def nonPsanaTester():
import matplotlib.pyplot as plt
import scipy.signal
import h5py
# Load the config file
config = \
loadConfig('/reg/neh/home/alindahl/amoc8114/configFiles/configTofDataModuleTester.json')
plt.ion()
# Open an hdf5 file
fileName = ('/reg/neh/home/alindahl/amoc8114/output/keepers' +
'/amoc8114_run109_2014-6-16_1.hdf5')
file = h5py.File(fileName, 'r')
# Make references to some of the data in the h5 file
tAx = file['tof_time_scale_us']
tAmpVec = file['tof_timeAmplitude_V']
# Create the TofData object
tof = TofData(config, verbose=True)
#tof.setup_time_domain_filtering(False)
tof.setup_time_domain_filtering(method='average')
#tof.setup_time_domain_filtering(method='wienerDeconv',
# SNR=np.fromfile('../h5Analysis/SNRrun109.npy'),
# response=np.fromfile('../h5Analysis/responseFunction108.npy'))
EMin = config.tof_minE_eV
EMax = config.tof_maxE_eV
threshold = 0.02
min_width_eV = 3
yValuesBar = np.array((0,2,1,1,2,0,1,1,2,0))
xValuesBar = np.zeros(np.size(yValuesBar))
for num, tAmpI in enumerate(tAmpVec):
if num >= config.nEvents:
break
print 'event ', num
if num is 0:
#Initialize the scalse
tof.setup_scales(None,
np.linspace(EMin, EMax, config.tof_nEnergyBins),
tAx[:])
# Get the x scales
t = tof.get_time_scale_us()
E = tof.get_energy_scale_eV()
rawE = tof.get_raw_energy_scale()
# setup local plotting
f1 = plt.figure(1)
f1.clf()
a1 = f1.add_subplot(211)
a1.plot(t,t, label='raw signal')
a1.plot(t, t, 'r', label='filtered signal')
l11, l12 = a1.get_lines()
a1.autoscale_view(scalex=False)
a1.legend()
a2 = f1.add_subplot(212)
a2.plot(rawE,rawE, label='raw energy data')
a2.plot(E,E, 'r.', label='energy rescaled data')
a2.plot(yValuesBar, yValuesBar, 'k', label='peak finding results')
l21, l22, l23 = a2.get_lines()
a2.set_xlim(EMin, EMax)
a2.legend()
f1.show()
# Get the y data
tof.set_raw_data(timeAmplitude_V=tAmpI[:])
tAmpRaw = tof.get_time_amplitude()
if tAmpRaw == None:
continue
tAmpF = tof.get_time_amplitude_filtered()
EAmp = tof.get_energy_amplitude()
t_timing = time.time()
min, max, th = tof.get_trace_bounds(threshold, min_width_eV)
print 'thresholding time:', time.time()-t_timing
print 'min =', min, 'max =', max, 'threshold = ', th
print 'bar y:', yValuesBar
xValuesBar[:3] = min
xValuesBar[3:7] = (min+max)/2
xValuesBar[7:] = max
# local plotting
l11.set_ydata(tAmpRaw)
l12.set_ydata(tAmpF)
a1.relim()
a1.autoscale_view(scalex=False)
l21.set_ydata(tAmpF / tAmpF.max() * EAmp.max())
l22.set_ydata(EAmp)
l23.set_ydata(yValuesBar*th)
l23.set_xdata(xValuesBar)
a2.relim()
a2.autoscale_view(scalex=False)
#a2.xlim(EMin, EMax)
f1.canvas.draw()
time.sleep(1)
if __name__ == '__main__':
psanaTester()
#nonPsanaTester()
| gpl-2.0 |
ltiao/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
pv/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
NicovincX2/Python-3.5 | Problèmes divers/probleme_du_chien.py | 1 | 1908 | # -*- coding: utf-8 -*-
import os
"""
Résolution de l'exercice du chien C qui course son maître M qui fait son
jogging à une vitesse v0 suivant vex en le visant à une vitesse v suivant le
vecteur vect(CM)/CM.
L'idée est de pouvoir faire varier les différents paramètres, notamment le
rapport de vitesse, pour voir l'impact sur les trajectoires possibles.
"""
import numpy as np
import scipy as sp
import scipy.integrate
import matplotlib.pyplot as plt
v = 1 # Vitesse du chien
v0 = 1 # Vitesse du maître
# Position initiale du chien (cf coordonnees de l'exercice)
theta0 = np.pi / 2
rho0 = 4
y0 = [rho0, theta0]
# Distribution des temps
tmin, tmax, dt = 0, 20, 0.1
nb_t = int((tmax - tmin) / dt)
t = np.linspace(tmin, tmax, nb_t)
def f(y, t):
""" Fonction permettant la résolution du système différentiel
drho/dt = -v + v0*cos(theta)
rho*dtheta/dt = -v0*sin(theta)
"""
rho, theta = y
return [-v + v0 * np.cos(theta), -v0 * np.sin(theta) / rho]
sol = sp.integrate.odeint(f, y0, t)
# Position du maître dans le référentiel du chien.
rho = sol[:, 0]
theta = sol[:, 1]
# Trajectoire du chien
XC = v0 * t - rho * np.cos(theta)
YC = - rho * np.sin(theta)
fig = plt.figure()
maitre_traj, = plt.plot(v0 * t, t * 0 + 0.1, 'k')
chien_traj, = plt.plot(XC, YC, 'k')
maitre_pos, = plt.plot([0], [0], 'ob')
chien_pos, = plt.plot([XC[-1]], [YC[-1]], 'or')
plt.title('Un chien (en rouge) court apres son maitre (en bleu)')
def animate(i):
maitre_traj.set_data(v0 * t[:i], t[:i] * 0)
chien_traj.set_data(XC[:i], YC[:i])
maitre_pos.set_xdata([v0 * t[i]])
chien_pos.set_data([XC[i]], [YC[i]])
from matplotlib import animation # Pour l'animation progressive
# L'animation proprement dite
anim = animation.FuncAnimation(
fig, animate, frames=nb_t, interval=20, blit=False)
plt.show()
os.system("pause")
| gpl-3.0 |
ueser/FIDDLE | _deprecated/TORCHmodels/dataPrep/data4predictionYAC.py | 2 | 8510 | #!/usr/bin/env python
import os
import sys
sys.path.append('/Users/umut/Projects/genome/python/lib')
import genome.db
from optparse import OptionParser
import h5py
import pandas as pd
import numpy as np
################################################################################
# data4predictionYAC.py
#
# Make an HDF5 file for Torch input using a pyTable wrapper called genome: https://github.com/gmcvicker/genome
#
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <annotation_file> <out_file>'
parser = OptionParser(usage)
parser.add_option('-d', dest='rootDir', type='str', default='.', help='Root directory of the project [Default: %default]')
parser.add_option('-p', dest='species', type='str', default='Scer', help='Species? [Default: %default]')
parser.add_option('-b', dest='chunkSize', default=100, type='int', help='Align sizes with batch size')
parser.add_option('-e', dest='width', type='int', default=500, help='Extend all sequences to this length [Default: %default]')
parser.add_option('-r', dest='stride', default=20, type='int', help='Stride sequences [Default: %default]')
(options,args) = parser.parse_args()
if len(args) !=2 :
print(args)
print(options)
print(len(args))
parser.error('Must provide annotation file, and an output prefix')
else:
annot_file = args[0]
out_file = args[1]
#### <-- to be generalized ---> ####
annot = pd.read_table(annot_file,sep="\t")
# annot.drop(annot.columns[[range(6,11)]],axis=1,inplace=True)
print annot.head()
annot.columns = ['chr','start','end','strand']
# switch start and end positions into tss for negative strand
annot['tss'] = np.zeros([annot.shape[0]],dtype=int)
tf = (annot.strand=='+')
annot.loc[tf,'tss'] = annot[tf].start
tf = (annot.strand=='-')
annot.loc[tf,'tss'] = annot[tf].end
#### --> to be generalized <--- ####
# Make directory for the project
directory = "../../data/hdf5datasets/"
if not os.path.exists(directory):
os.makedirs(directory)
totLen=0
chrRange = annot['chr'].unique()
for chname in chrRange:
st = min(annot[annot['chr']==chname].start)
en = max(annot[annot['chr']==chname].end-options.width)
totLen += (en-st-options.width)/options.stride
trainSize = 2*np.ceil(totLen+options.chunkSize)
print >> sys.stderr, '%d training sequences ' % trainSize
print os.path.join(directory,out_file)
# open the hdf5 file to write
f = h5py.File(os.path.join(directory,out_file), "w")
# note that we have 1 channel and 4xoptions.width matrices for dna sequence.
NStrainData = f.create_dataset("NStrainInp", (trainSize,2,1,options.width))
MStrainData = f.create_dataset("MStrainInp", (trainSize,2,1,options.width))
DStrainData = f.create_dataset("DStrainInp", (trainSize,4,1,options.width))
RStrainData = f.create_dataset("RStrainInp", (trainSize,1,1,options.width))
TFtrainData = f.create_dataset("TFtrainInp", (trainSize,2,1,options.width))
info = f.create_dataset("info", (trainSize,4)) # chromosome no, strand, index of the annotation, genomic position
# Use 4 channel and 1xoptions.width
NSdata = np.zeros([options.chunkSize,2,1,options.width])
MSdata = np.zeros([options.chunkSize,2,1,options.width])
DSdata = np.zeros([options.chunkSize,4,1,options.width])
RSdata = np.zeros([options.chunkSize,1,1,options.width])
TFdata = np.zeros([options.chunkSize,2,1,options.width])
infodata = np.zeros([options.chunkSize,4])
if options.species in ['Scer','YSC001']:
assembly = 'sacCer3'
elif options.species in ['YJ160']:
assembly = 'Klla'
elif options.species in ['YJ177']:
assembly = 'DeHa2'
elif options.species in ['YJ167','YJ168','YJ169']:
assembly = 'KllaYAC'
elif options.species in ['YJ170','YJ71']:
assembly = 'DehaYAC'
else:
raise('unknown species')
print assembly
gdb = genome.db.GenomeDB(path='/Users/umut/Projects/genome/data/share/genome_db',assembly=assembly)
NSpos = gdb.open_track('NSpos')
NSneg = gdb.open_track('NSneg')
MSpos = gdb.open_track('MSpos')
MSneg = gdb.open_track('MSneg')
TFpos = gdb.open_track('TFpos')
TFneg = gdb.open_track('TFneg')
RS = gdb.open_track('RS')
seq = gdb.open_track("seq")
qq=0;
cc=0;
ps =0;
debugMode = True
for chname in chrRange:
cc+=1
st = min(annot[annot['chr']==chname].start)
en = max(annot[annot['chr']==chname].end)-options.width
xran = np.arange(st,en,options.stride)
for pos in xran:
seqVec = seq.get_seq_str(chname,pos+1,(pos+options.width))
dsdata = vectorizeSequence(seqVec.lower())
nsP = NSpos.get_nparray(chname,pos+1,(pos+options.width))
nsN = NSneg.get_nparray(chname,pos+1,(pos+options.width))
msP = MSpos.get_nparray(chname,pos+1,(pos+options.width))
msN = MSneg.get_nparray(chname,pos+1,(pos+options.width))
tfP = TFpos.get_nparray(chname,pos+1,(pos+options.width))
tfN = TFneg.get_nparray(chname,pos+1,(pos+options.width))
rs = RS.get_nparray(chname,pos+1,(pos+options.width))
if debugMode:
if not checkData(np.r_[nsP,nsN,msP,msN,rs,tfP,tfN]):
print('NaN detected in chr' + chname + ' and at the position:' + str(pos))
continue
NSdata[qq,0,0,:] = nsP.T
NSdata[qq,1,0,:] = nsN.T
MSdata[qq,0,0,:] = msP.T
MSdata[qq,1,0,:] = msN.T
TFdata[qq,0,0,:] = tfP.T
TFdata[qq,1,0,:] = tfN.T
DSdata[qq,:,0,:] = dsdata.T
RSdata[qq,0,0,:] = rs.T
infodata[qq,:] = [cc, 1,0,pos]
qq+=1
NSdata[qq,0,0,:] = np.flipud(nsN).T
NSdata[qq,1,0,:] = np.flipud(nsP).T
MSdata[qq,0,0,:] = np.flipud(msN).T
MSdata[qq,1,0,:] = np.flipud(msP).T
TFdata[qq,0,0,:] = np.flipud(tfN).T
TFdata[qq,1,0,:] = np.flipud(tfP).T
RSdata[qq,0,0,:] = np.flipud(rs).T
DSdata[qq,:,0,:] = np.flipud(np.fliplr(dsdata)).T
infodata[qq,:] = [cc, -1,0,pos]
qq+=1
if (ps < trainSize) and (qq>=options.chunkSize):
stp = options.chunkSize
NStrainData[range(ps,ps+stp),:,:,:] = NSdata
MStrainData[range(ps,ps+stp),:,:,:] = MSdata
TFtrainData[range(ps,ps+stp),:,:,:] = TFdata
DStrainData[range(ps,ps+stp),:,:,:] = DSdata
RStrainData[range(ps,ps+stp),:,:,:] = RSdata
info[range(ps,ps+stp),:] = infodata
# Use 4 channel and 1xoptions.width
NSdata = np.zeros([options.chunkSize,2,1,options.width])
MSdata = np.zeros([options.chunkSize,2,1,options.width])
TFdata = np.zeros([options.chunkSize,2,1,options.width])
DSdata = np.zeros([options.chunkSize,4,1,options.width])
RSdata = np.zeros([options.chunkSize,1,1,options.width])
infodata = np.zeros([options.chunkSize,4])
ps+=stp
qq=0
print >> sys.stderr, '%d training chunk saved ' % ps
if ps >=(trainSize):
nestVar = 1;
break
print ps
f.close()
NSpos.close()
NSneg.close()
MSpos.close()
MSneg.close()
TFpos.close()
TFneg.close()
RS.close()
seq.close()
def vectorizeSequence(seq):
# the order of the letters is not arbitrary.
# Flip the matrix up-down and left-right for reverse compliment
ltrdict = {'a':[1,0,0,0],'c':[0,1,0,0],'g':[0,0,1,0],'t':[0,0,0,1],'n':[0.25,0.25,0.25,0.25]}
return np.array([ltrdict[x] for x in seq])
def checkData(Xdata):
if np.sum(np.isnan(Xdata).flatten())>0:
return False
else:
return True
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| gpl-3.0 |
dingocuster/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/manifold/isomap.py | 50 | 7515 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
suyashbire1/pyhton_scripts_mom6 | plot_eb_transport.py | 1 | 1522 | import sys
import readParams_moreoptions as rdp1
import matplotlib.pyplot as plt
from mom_plot import m6plot
import numpy as np
from netCDF4 import MFDataset as mfdset, Dataset as dset
import time
import pyximport
pyximport.install()
from getvaratzc import getvaratzc
def plot_eb_transport(geofil,vgeofil,fil,xstart,xend,ystart,yend,zs,ze,meanax,savfil=None):
keepax = ()
for i in range(4):
if i not in meanax:
keepax += (i,)
fh = mfdset(fil)
(xs,xe),(ys,ye),dimv = rdp1.getlatlonindx(fh,wlon=xstart,elon=xend,
slat=ystart, nlat=yend,zs=zs,ze=ze,yhyq='yq')
fhgeo = dset(geofil)
D = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[0]
fhgeo.close()
nt_const = dimv[0].size
vh = fh.variables['vh'][0:,zs:ze,ys:ye,xs:xe]
e = fh.variables['e'][0:,zs:ze,ys:ye,xs:xe]
fh.close()
vh = vh.filled(0)
X = dimv[keepax[1]]
Y = dimv[keepax[0]]
if 1 in keepax:
z = np.linspace(-np.nanmax([2000]),-1,num=50)
Y = z
P = getvaratzc(vh,z,e)
P = np.ma.apply_over_axes(np.mean, P, meanax)
P = P.squeeze()
#P = np.ma.apply_over_axes(np.mean, vh, meanax).squeeze()
im = m6plot((X,Y,P),titl='Transport near EB', ylab='z (m)', xlab='y (Deg)')
if savfil:
plt.savefig(savfil+'.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
im = m6plot((X,Y,P),titl='Transport near EB', ylab='z (m)', xlab='y (Deg)')
plt.show()
| gpl-3.0 |
dhhagan/opcsim | opcsim/metrics.py | 1 | 2962 | """Contains the scoring algorithms used in the model.
"""
import numpy as np
import pandas as pd
from .models import OPC
from .utils import k_kohler, ri_eff
from .mie import cscat
def compute_bin_assessment(opc, refr, kappa, rh_values=[0., 35., 95.]):
"""Assess the ability of an OPC to assign particles to their correct bin.
Parameters
----------
opc: opcsim.OPC
refr: complex
The complex refractive index of the material to assess
kappa: float
The kappa value to use for hygroscopic growth
rh_values: list-like
A list of relative humidities to assess the OPC at.
Returns
-------
rv: pd.DataFrame
A dataframe containing the results with self-explanatory columns.
Examples
--------
"""
assert(isinstance(opc, OPC)), "opc must be an instance of the opcsim.OPC class"
# init the dataframe to hold our results
rv = pd.DataFrame()
for rh in rh_values:
for i, _bins in enumerate(opc.bins):
# compute the wet diameter
wet_diam_lo = k_kohler(diam_dry=_bins[0], kappa=kappa, rh=rh)
wet_diam_hi = k_kohler(diam_dry=_bins[-1], kappa=kappa, rh=rh)
# compute the pct_dry
pct_dry = (_bins[0]**3) / (wet_diam_lo**3)
# compute the effective RI
ri = ri_eff(species=[refr, complex(1.333, 0)], weights=[pct_dry, 1-pct_dry])
# compute the scattering cross-section
cscat_lo_exp = cscat(
dp=_bins[0], wl=opc.wl, refr=refr, theta1=opc.theta[0], theta2=opc.theta[1])
cscat_hi_exp = cscat(
dp=_bins[-1], wl=opc.wl, refr=refr, theta1=opc.theta[0], theta2=opc.theta[1])
cscat_lo = cscat(
dp=wet_diam_lo, wl=opc.wl, refr=ri, theta1=opc.theta[0], theta2=opc.theta[1])
cscat_hi = cscat(
dp=wet_diam_hi, wl=opc.wl, refr=ri, theta1=opc.theta[0], theta2=opc.theta[1])
# assign bins
bin_assign_lo = opc.calibration_function(values=[cscat_lo])
bin_assign_hi = opc.calibration_function(values=[cscat_hi])
# add results to the dataframe
rv = rv.append({
"bin_true": i,
"bin_lo": bin_assign_lo[0] if len(bin_assign_lo) > 0 else -99,
"bin_hi": bin_assign_hi[0] if len(bin_assign_hi) > 0 else -99,
"refr_eff": ri,
"rh": rh,
"cscat_hi_ratio": cscat_hi / cscat_hi_exp,
"cscat_lo_ratio": cscat_lo / cscat_lo_exp,
}, ignore_index=True)
# force datatypes to be correct
rv["bin_true"] = rv["bin_true"].astype(int)
rv["bin_lo"] = rv["bin_lo"].astype(int)
rv["bin_hi"] = rv["bin_hi"].astype(int)
rv["rh"] = rv["rh"].astype(float)
rv["cscat_hi_ratio"] = rv["cscat_hi_ratio"].astype(float)
rv["cscat_lo_ratio"] = rv["cscat_lo_ratio"].astype(float)
return rv
| mit |
ch3ll0v3k/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
thirdwing/mxnet | example/speech_recognition/stt_utils.py | 44 | 5892 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
bartslinger/paparazzi | sw/tools/calibration/calibration_utils.py | 27 | 12769 |
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
range = max_meas - min_meas
# check if we would get division by zero
if range.all():
n = (max_meas + min_meas) / 2
sf = 2*scale/range
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
else:
return np.array([0, 0, 0, 0])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_measurements(sensor, measurements):
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.ylabel('ADC')
plt.title("Raw %s measurements" % sensor)
plt.show()
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
| gpl-2.0 |
miloharper/neural-network-animation | matplotlib/image.py | 10 | 49749 | """
The image module supports basic image loading, rescaling and display
operations.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import warnings
import numpy as np
from matplotlib import rcParams
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.cbook as cbook
# For clarity, names from _image are given explicitly in this module:
import matplotlib._image as _image
import matplotlib._png as _png
# For user convenience, the names from _image are also imported into
# the image namespace:
from matplotlib._image import *
from matplotlib.transforms import BboxBase, Bbox, IdentityTransform
import matplotlib.transforms as mtransforms
class _AxesImageBase(martist.Artist, cm.ScalarMappable):
zorder = 0
# map interpolation strings to module constants
_interpd = {
'none': _image.NEAREST, # fall back to nearest when not supported
'nearest': _image.NEAREST,
'bilinear': _image.BILINEAR,
'bicubic': _image.BICUBIC,
'spline16': _image.SPLINE16,
'spline36': _image.SPLINE36,
'hanning': _image.HANNING,
'hamming': _image.HAMMING,
'hermite': _image.HERMITE,
'kaiser': _image.KAISER,
'quadric': _image.QUADRIC,
'catrom': _image.CATROM,
'gaussian': _image.GAUSSIAN,
'bessel': _image.BESSEL,
'mitchell': _image.MITCHELL,
'sinc': _image.SINC,
'lanczos': _image.LANCZOS,
'blackman': _image.BLACKMAN,
}
# reverse interp dict
_interpdr = dict([(v, k) for k, v in six.iteritems(_interpd)])
interpnames = list(six.iterkeys(_interpd))
def __str__(self):
return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
def __init__(self, ax,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
**kwargs
):
"""
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None:
origin = rcParams['image.origin']
self.origin = origin
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self._filterrad = filterrad
self.set_interpolation(interpolation)
self.set_resample(resample)
self.axes = ax
self._imcache = None
# this is an experimental attribute, if True, unsampled image
# will be drawn using the affine transform that are
# appropriately skewed so that the given position
# corresponds to the actual position in the coordinate. -JJL
self._image_skew_coordinate = None
self.update(kwargs)
def __getstate__(self):
state = super(_AxesImageBase, self).__getstate__()
# We can't pickle the C Image cached object.
state['_imcache'] = None
return state
def get_size(self):
"""Get the numrows, numcols of the input image"""
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self._imcache = None
def changed(self):
"""
Call this whenever the mappable is changed so observers can
update state
"""
self._imcache = None
self._rgbacache = None
cm.ScalarMappable.changed(self)
def make_image(self, magnification=1.0):
raise RuntimeError('The make_image method must be overridden.')
def _get_unsampled_image(self, A, image_extents, viewlim):
"""
convert numpy array A with given extents ([x1, x2, y1, y2] in
data coordinate) into the Image, given the viewlim (should be a
bbox instance). Image will be clipped if the extents is
significantly larger than the viewlim.
"""
xmin, xmax, ymin, ymax = image_extents
dxintv = xmax-xmin
dyintv = ymax-ymin
# the viewport scale factor
if viewlim.width == 0.0 and dxintv == 0.0:
sx = 1.0
else:
sx = dxintv/viewlim.width
if viewlim.height == 0.0 and dyintv == 0.0:
sy = 1.0
else:
sy = dyintv/viewlim.height
numrows, numcols = A.shape[:2]
if sx > 2:
x0 = (viewlim.x0-xmin)/dxintv * numcols
ix0 = max(0, int(x0 - self._filterrad))
x1 = (viewlim.x1-xmin)/dxintv * numcols
ix1 = min(numcols, int(x1 + self._filterrad))
xslice = slice(ix0, ix1)
xmin_old = xmin
xmin = xmin_old + ix0*dxintv/numcols
xmax = xmin_old + ix1*dxintv/numcols
dxintv = xmax - xmin
sx = dxintv/viewlim.width
else:
xslice = slice(0, numcols)
if sy > 2:
y0 = (viewlim.y0-ymin)/dyintv * numrows
iy0 = max(0, int(y0 - self._filterrad))
y1 = (viewlim.y1-ymin)/dyintv * numrows
iy1 = min(numrows, int(y1 + self._filterrad))
if self.origin == 'upper':
yslice = slice(numrows-iy1, numrows-iy0)
else:
yslice = slice(iy0, iy1)
ymin_old = ymin
ymin = ymin_old + iy0*dyintv/numrows
ymax = ymin_old + iy1*dyintv/numrows
dyintv = ymax - ymin
sy = dyintv/viewlim.height
else:
yslice = slice(0, numrows)
if xslice != self._oldxslice or yslice != self._oldyslice:
self._imcache = None
self._oldxslice = xslice
self._oldyslice = yslice
if self._imcache is None:
if self._A.dtype == np.uint8 and self._A.ndim == 3:
im = _image.frombyte(self._A[yslice, xslice, :], 0)
im.is_grayscale = False
else:
if self._rgbacache is None:
x = self.to_rgba(self._A, bytes=False)
# Avoid side effects: to_rgba can return its argument
# unchanged.
if np.may_share_memory(x, self._A):
x = x.copy()
# premultiply the colors
x[..., 0:3] *= x[..., 3:4]
x = (x * 255).astype(np.uint8)
self._rgbacache = x
else:
x = self._rgbacache
im = _image.frombyte(x[yslice, xslice, :], 0)
if self._A.ndim == 2:
im.is_grayscale = self.cmap.is_gray()
else:
im.is_grayscale = False
self._imcache = im
if self.origin == 'upper':
im.flipud_in()
else:
im = self._imcache
return im, xmin, ymin, dxintv, dyintv, sx, sy
@staticmethod
def _get_rotate_and_skew_transform(x1, y1, x2, y2, x3, y3):
"""
Retuen a transform that does
(x1, y1) -> (x1, y1)
(x2, y2) -> (x2, y2)
(x2, y1) -> (x3, y3)
It was intended to derive a skew transform that preserve the
lower-left corner (x1, y1) and top-right corner(x2,y2), but
change the the lower-right-corner(x2, y1) to a new position
(x3, y3).
"""
tr1 = mtransforms.Affine2D()
tr1.translate(-x1, -y1)
x2a, y2a = tr1.transform_point((x2, y2))
x3a, y3a = tr1.transform_point((x3, y3))
inv_mat = 1. / (x2a*y3a-y2a*x3a) * np.mat([[y3a, -y2a], [-x3a, x2a]])
a, b = (inv_mat * np.mat([[x2a], [x2a]])).flat
c, d = (inv_mat * np.mat([[y2a], [0]])).flat
tr2 = mtransforms.Affine2D.from_values(a, c, b, d, 0, 0)
tr = (tr1 + tr2 +
mtransforms.Affine2D().translate(x1, y1)).inverted().get_affine()
return tr
def _draw_unsampled_image(self, renderer, gc):
"""
draw unsampled image. The renderer should support a draw_image method
with scale parameter.
"""
trans = self.get_transform() # axes.transData
# convert the coordinates to the intermediate coordinate (ic).
# The transformation from the ic to the canvas is a pure
# affine transform.
# A straight-forward way is to use the non-affine part of the
# original transform for conversion to the ic.
# firs, convert the image extent to the ic
x_llc, x_trc, y_llc, y_trc = self.get_extent()
xy = trans.transform(np.array([(x_llc, y_llc),
(x_trc, y_trc)]))
_xx1, _yy1 = xy[0]
_xx2, _yy2 = xy[1]
extent_in_ic = _xx1, _xx2, _yy1, _yy2
# define trans_ic_to_canvas : unless _image_skew_coordinate is
# set, it is simply a affine part of the original transform.
if self._image_skew_coordinate:
# skew the image when required.
x_lrc, y_lrc = self._image_skew_coordinate
xy2 = trans.transform(np.array([(x_lrc, y_lrc)]))
_xx3, _yy3 = xy2[0]
tr_rotate_skew = self._get_rotate_and_skew_transform(_xx1, _yy1,
_xx2, _yy2,
_xx3, _yy3)
trans_ic_to_canvas = tr_rotate_skew
else:
trans_ic_to_canvas = IdentityTransform()
# Now, viewLim in the ic. It can be rotated and can be
# skewed. Make it big enough.
x1, y1, x2, y2 = self.axes.bbox.extents
trans_canvas_to_ic = trans_ic_to_canvas.inverted()
xy_ = trans_canvas_to_ic.transform(np.array([(x1, y1),
(x2, y1),
(x2, y2),
(x1, y2)]))
x1_, x2_ = min(xy_[:, 0]), max(xy_[:, 0])
y1_, y2_ = min(xy_[:, 1]), max(xy_[:, 1])
viewLim_in_ic = Bbox.from_extents(x1_, y1_, x2_, y2_)
# get the image, sliced if necessary. This is done in the ic.
im, xmin, ymin, dxintv, dyintv, sx, sy = \
self._get_unsampled_image(self._A, extent_in_ic, viewLim_in_ic)
if im is None:
return # I'm not if this check is required. -JJL
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg(*bg)
# image input dimensions
im.reset_matrix()
numrows, numcols = im.get_size()
if numrows <= 0 or numcols <= 0:
return
im.resize(numcols, numrows) # just to create im.bufOut that
# is required by backends. There
# may be better solution -JJL
im._url = self.get_url()
im._gid = self.get_gid()
renderer.draw_image(gc, xmin, ymin, im, dxintv, dyintv,
trans_ic_to_canvas)
def _check_unsampled_image(self, renderer):
"""
return True if the image is better to be drawn unsampled.
The derived class needs to override it.
"""
return False
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
if not self.get_visible():
return
if (self.axes.get_xscale() != 'linear' or
self.axes.get_yscale() != 'linear'):
warnings.warn("Images are not supported on non-linear axes.")
l, b, widthDisplay, heightDisplay = self.axes.bbox.bounds
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_alpha(self.get_alpha())
if self._check_unsampled_image(renderer):
self._draw_unsampled_image(renderer, gc)
else:
if self._image_skew_coordinate is not None:
warnings.warn("Image will not be shown"
" correctly with this backend.")
im = self.make_image(renderer.get_image_magnification())
if im is None:
return
im._url = self.get_url()
im._gid = self.get_gid()
renderer.draw_image(gc, l, b, im)
gc.restore()
def contains(self, mouseevent):
"""
Test whether the mouse event occured within the image.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
x, y = mouseevent.xdata, mouseevent.ydata
xmin, xmax, ymin, ymax = self.get_extent()
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
#print x, y, xmin, xmax, ymin, ymax
if x is not None and y is not None:
inside = ((x >= xmin) and (x <= xmax) and
(y >= ymin) and (y <= ymax))
else:
inside = False
return inside, {}
def write_png(self, fname, noscale=False):
"""Write the image to png file with fname"""
im = self.make_image()
if im is None:
return
if noscale:
numrows, numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
def set_data(self, A):
"""
Set the image array
ACCEPTS: numpy/PIL Image A
"""
# check if data is PIL Image without importing Image
if hasattr(A, 'getpixel'):
self._A = pil_to_array(A)
else:
self._A = cbook.safe_masked_invalid(A)
if (self._A.dtype != np.uint8 and
not np.can_cast(self._A.dtype, np.float)):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self._imcache = None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
def set_array(self, A):
"""
Retained for backwards compatibility - use set_data instead
ACCEPTS: numpy array A or PIL Image"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked
# by mistake.
self.set_data(A)
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36',
'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom',
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', or 'none'.
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
if None, use a value from rc setting. If 'none', the image is
shown as is without interpolating. 'none' is only supported in
agg, ps and pdf backends and will fall back to 'nearest' mode
for other backends.
ACCEPTS: ['nearest' | 'bilinear' | 'bicubic' | 'spline16' |
'spline36' | 'hanning' | 'hamming' | 'hermite' | 'kaiser' |
'quadric' | 'catrom' | 'gaussian' | 'bessel' | 'mitchell' |
'sinc' | 'lanczos' | 'none' |]
"""
if s is None:
s = rcParams['image.interpolation']
s = s.lower()
if s not in self._interpd:
raise ValueError('Illegal interpolation string')
self._interpolation = s
def set_resample(self, v):
"""
Set whether or not image resampling is used
ACCEPTS: True|False
"""
if v is None:
v = rcParams['image.resample']
self._resample = v
def get_resample(self):
"""Return the image resample boolean"""
return self._resample
def set_filternorm(self, filternorm):
"""
Set whether the resize filter norms the weights -- see
help for imshow
ACCEPTS: 0 or 1
"""
if filternorm:
self._filternorm = 1
else:
self._filternorm = 0
def get_filternorm(self):
"""Return the filternorm setting"""
return self._filternorm
def set_filterrad(self, filterrad):
"""
Set the resize filter radius only applicable to some
interpolation schemes -- see help for imshow
ACCEPTS: positive float
"""
r = float(filterrad)
assert(r > 0)
self._filterrad = r
def get_filterrad(self):
"""return the filterrad setting"""
return self._filterrad
class AxesImage(_AxesImageBase):
def __str__(self):
return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
def __init__(self, ax,
cmap=None,
norm=None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample=False,
**kwargs
):
"""
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
self._extent = extent
_AxesImageBase.__init__(self, ax,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image'
' array or the image attribute')
# image is created in the canvas coordinate.
x1, x2, y1, y2 = self.get_extent()
trans = self.get_transform()
xy = trans.transform(np.array([(x1, y1),
(x2, y2),
]))
_x1, _y1 = xy[0]
_x2, _y2 = xy[1]
transformed_viewLim = mtransforms.TransformedBbox(self.axes.viewLim,
trans)
im, xmin, ymin, dxintv, dyintv, sx, sy = \
self._get_unsampled_image(self._A, [_x1, _x2, _y1, _y2],
transformed_viewLim)
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg(*bg)
# image input dimensions
im.reset_matrix()
numrows, numcols = im.get_size()
if numrows < 1 or numcols < 1: # out of range
return None
im.set_interpolation(self._interpd[self._interpolation])
im.set_resample(self._resample)
# the viewport translation
if dxintv == 0.0:
tx = 0.0
else:
tx = (xmin-transformed_viewLim.x0)/dxintv * numcols
if dyintv == 0.0:
ty = 0.0
else:
ty = (ymin-transformed_viewLim.y0)/dyintv * numrows
im.apply_translation(tx, ty)
l, b, r, t = self.axes.bbox.extents
widthDisplay = ((round(r*magnification) + 0.5) -
(round(l*magnification) - 0.5))
heightDisplay = ((round(t*magnification) + 0.5) -
(round(b*magnification) - 0.5))
# resize viewport to display
rx = widthDisplay / numcols
ry = heightDisplay / numrows
im.apply_scaling(rx*sx, ry*sy)
im.resize(int(widthDisplay+0.5), int(heightDisplay+0.5),
norm=self._filternorm, radius=self._filterrad)
return im
def _check_unsampled_image(self, renderer):
"""
return True if the image is better to be drawn unsampled.
"""
if self.get_interpolation() == "none":
if renderer.option_scale_image():
return True
else:
warnings.warn("The backend (%s) does not support "
"interpolation='none'. The image will be "
"interpolated with 'nearest` "
"mode." % renderer.__class__)
return False
def set_extent(self, extent):
"""
extent is data axes (left, right, bottom, top) for making image plots
This updates ax.dataLim, and, if autoscaling, sets viewLim
to tightly fit the image, regardless of dataLim. Autoscaling
state is not changed, so following this with ax.autoscale_view
will redo the autoscaling in accord with dataLim.
"""
self._extent = extent
xmin, xmax, ymin, ymax = extent
corners = (xmin, ymin), (xmax, ymax)
self.axes.update_datalim(corners)
if self.axes._autoscaleXon:
self.axes.set_xlim((xmin, xmax), auto=None)
if self.axes._autoscaleYon:
self.axes.set_ylim((ymin, ymax), auto=None)
def get_extent(self):
"""Get the image extent: left, right, bottom, top"""
if self._extent is not None:
return self._extent
else:
sz = self.get_size()
#print 'sz', sz
numrows, numcols = sz
if self.origin == 'upper':
return (-0.5, numcols-0.5, numrows-0.5, -0.5)
else:
return (-0.5, numcols-0.5, -0.5, numrows-0.5)
class NonUniformImage(AxesImage):
def __init__(self, ax, **kwargs):
"""
kwargs are identical to those for AxesImage, except
that 'interpolation' defaults to 'nearest', and 'bilinear'
is the only alternative.
"""
interp = kwargs.pop('interpolation', 'nearest')
AxesImage.__init__(self, ax,
**kwargs)
self.set_interpolation(interp)
def _check_unsampled_image(self, renderer):
"""
return False. Do not use unsampled image.
"""
return False
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
A = self._A
if len(A.shape) == 2:
if A.dtype != np.uint8:
A = self.to_rgba(A, bytes=True)
self.is_grayscale = self.cmap.is_gray()
else:
A = np.repeat(A[:, :, np.newaxis], 4, 2)
A[:, :, 3] = 255
self.is_grayscale = True
else:
if A.dtype != np.uint8:
A = (255*A).astype(np.uint8)
if A.shape[2] == 3:
B = np.zeros(tuple(list(A.shape[0:2]) + [4]), np.uint8)
B[:, :, 0:3] = A
B[:, :, 3] = 255
A = B
self.is_grayscale = False
x0, y0, v_width, v_height = self.axes.viewLim.bounds
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width *= magnification
height *= magnification
im = _image.pcolor(self._Ax, self._Ay, A,
height, width,
(x0, x0+v_width, y0, y0+v_height),
self._interpd[self._interpolation])
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg(*bg)
im.is_grayscale = self.is_grayscale
return im
def set_data(self, x, y, A):
"""
Set the grid for the pixel centers, and the pixel values.
*x* and *y* are 1-D ndarrays of lengths N and M, respectively,
specifying pixel centers
*A* is an (M,N) ndarray or masked array of values to be
colormapped, or a (M,N,3) RGB array, or a (M,N,4) RGBA
array.
"""
x = np.asarray(x, np.float32)
y = np.asarray(y, np.float32)
A = cbook.safe_masked_invalid(A)
if len(x.shape) != 1 or len(y.shape) != 1\
or A.shape[0:2] != (y.shape[0], x.shape[0]):
raise TypeError("Axes don't match array shape")
if len(A.shape) not in [2, 3]:
raise TypeError("Can only plot 2D or 3D data")
if len(A.shape) == 3 and A.shape[2] not in [1, 3, 4]:
raise TypeError("3D arrays must have three (RGB) "
"or four (RGBA) color components")
if len(A.shape) == 3 and A.shape[2] == 1:
A.shape = A.shape[0:2]
self._A = A
self._Ax = x
self._Ay = y
self._imcache = None
# I am adding this in accor with _AxesImageBase.set_data --
# examples/pylab_examples/image_nonuniform.py was breaking on
# the call to _get_unsampled_image when the oldxslice attr was
# accessed - JDH 3/3/2010
self._oldxslice = None
self._oldyslice = None
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_interpolation(self, s):
if s is not None and s not in ('nearest', 'bilinear'):
raise NotImplementedError('Only nearest neighbor and '
'bilinear interpolations are supported')
AxesImage.set_interpolation(self, s)
def get_extent(self):
if self._A is None:
raise RuntimeError('Must set data first')
return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
def set_filternorm(self, s):
pass
def set_filterrad(self, s):
pass
def set_norm(self, norm):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_norm(self, norm)
def set_cmap(self, cmap):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_cmap(self, cmap)
class PcolorImage(martist.Artist, cm.ScalarMappable):
"""
Make a pcolor-style plot with an irregular rectangular grid.
This uses a variation of the original irregular image code,
and it is used by pcolorfast for the corresponding grid type.
"""
def __init__(self, ax,
x=None,
y=None,
A=None,
cmap=None,
norm=None,
**kwargs
):
"""
cmap defaults to its rc setting
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.axes = ax
self._rgbacache = None
# There is little point in caching the image itself because
# it needs to be remade if the bbox or viewlim change,
# so caching does help with zoom/pan/resize.
self.update(kwargs)
self.set_data(x, y, A)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
bg = (np.array(bg)*255).astype(np.uint8)
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width = width * magnification
height = height * magnification
if self._rgbacache is None:
A = self.to_rgba(self._A, bytes=True)
self._rgbacache = A
if self._A.ndim == 2:
self.is_grayscale = self.cmap.is_gray()
else:
A = self._rgbacache
vl = self.axes.viewLim
im = _image.pcolor2(self._Ax, self._Ay, A,
height,
width,
(vl.x0, vl.x1, vl.y0, vl.y1),
bg)
im.is_grayscale = self.is_grayscale
return im
def changed(self):
self._rgbacache = None
cm.ScalarMappable.changed(self)
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
if not self.get_visible():
return
im = self.make_image(renderer.get_image_magnification())
gc = renderer.new_gc()
gc.set_clip_rectangle(self.axes.bbox.frozen())
gc.set_clip_path(self.get_clip_path())
gc.set_alpha(self.get_alpha())
renderer.draw_image(gc,
round(self.axes.bbox.xmin),
round(self.axes.bbox.ymin),
im)
gc.restore()
def set_data(self, x, y, A):
A = cbook.safe_masked_invalid(A)
if x is None:
x = np.arange(0, A.shape[1]+1, dtype=np.float64)
else:
x = np.asarray(x, np.float64).ravel()
if y is None:
y = np.arange(0, A.shape[0]+1, dtype=np.float64)
else:
y = np.asarray(y, np.float64).ravel()
if A.shape[:2] != (y.size-1, x.size-1):
print(A.shape)
print(y.size)
print(x.size)
raise ValueError("Axes don't match array shape")
if A.ndim not in [2, 3]:
raise ValueError("A must be 2D or 3D")
if A.ndim == 3 and A.shape[2] == 1:
A.shape = A.shape[:2]
self.is_grayscale = False
if A.ndim == 3:
if A.shape[2] in [3, 4]:
if ((A[:, :, 0] == A[:, :, 1]).all() and
(A[:, :, 0] == A[:, :, 2]).all()):
self.is_grayscale = True
else:
raise ValueError("3D arrays must have RGB or RGBA as last dim")
self._A = A
self._Ax = x
self._Ay = y
self._rgbacache = None
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self.update_dict['array'] = True
class FigureImage(martist.Artist, cm.ScalarMappable):
zorder = 0
def __init__(self, fig,
cmap=None,
norm=None,
offsetx=0,
offsety=0,
origin=None,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None:
origin = rcParams['image.origin']
self.origin = origin
self.figure = fig
self.ox = offsetx
self.oy = offsety
self.update(kwargs)
self.magnification = 1.0
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image."""
if six.callable(self._contains):
return self._contains(self, mouseevent)
xmin, xmax, ymin, ymax = self.get_extent()
xdata, ydata = mouseevent.x, mouseevent.y
#print xdata, ydata, xmin, xmax, ymin, ymax
if xdata is not None and ydata is not None:
inside = ((xdata >= xmin) and (xdata <= xmax) and
(ydata >= ymin) and (ydata <= ymax))
else:
inside = False
return inside, {}
def get_size(self):
"""Get the numrows, numcols of the input image"""
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def get_extent(self):
"""Get the image extent: left, right, bottom, top"""
numrows, numcols = self.get_size()
return (-0.5+self.ox, numcols-0.5+self.ox,
-0.5+self.oy, numrows-0.5+self.oy)
def set_data(self, A):
"""Set the image array."""
cm.ScalarMappable.set_array(self, cbook.safe_masked_invalid(A))
def set_array(self, A):
"""Deprecated; use set_data for consistency with other image types."""
self.set_data(A)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
x = self.to_rgba(self._A, bytes=True)
self.magnification = magnification
# if magnification is not one, we need to resize
ismag = magnification != 1
#if ismag: raise RuntimeError
if ismag:
isoutput = 0
else:
isoutput = 1
im = _image.frombyte(x, isoutput)
fc = self.figure.get_facecolor()
im.set_bg(*mcolors.colorConverter.to_rgba(fc, 0))
im.is_grayscale = (self.cmap.name == "gray" and
len(self._A.shape) == 2)
if ismag:
numrows, numcols = self.get_size()
numrows *= magnification
numcols *= magnification
im.set_interpolation(_image.NEAREST)
im.resize(numcols, numrows)
if self.origin == 'upper':
im.flipud_out()
return im
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
if not self.get_visible():
return
# todo: we should be able to do some cacheing here
im = self.make_image(renderer.get_image_magnification())
gc = renderer.new_gc()
gc.set_clip_rectangle(self.figure.bbox)
gc.set_clip_path(self.get_clip_path())
gc.set_alpha(self.get_alpha())
renderer.draw_image(gc, round(self.ox), round(self.oy), im)
gc.restore()
def write_png(self, fname):
"""Write the image to png file with fname"""
im = self.make_image()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
class BboxImage(_AxesImageBase):
"""The Image class whose size is determined by the given bbox."""
def __init__(self, bbox,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
interp_at_native=True,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
interp_at_native is a flag that determines whether or not
interpolation should still be applied when the image is
displayed at its native resolution. A common use case for this
is when displaying an image for annotational purposes; it is
treated similarly to Photoshop (interpolation is only used when
displaying the image at non-native resolutions).
kwargs are an optional list of Artist keyword args
"""
_AxesImageBase.__init__(self, ax=None,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self.bbox = bbox
self.interp_at_native = interp_at_native
def get_window_extent(self, renderer=None):
if renderer is None:
renderer = self.get_figure()._cachedRenderer
if isinstance(self.bbox, BboxBase):
return self.bbox
elif six.callable(self.bbox):
return self.bbox(renderer)
else:
raise ValueError("unknown type of bbox")
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image."""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible(): # or self.get_figure()._renderer is None:
return False, {}
x, y = mouseevent.x, mouseevent.y
inside = self.get_window_extent().contains(x, y)
return inside, {}
def get_size(self):
"""Get the numrows, numcols of the input image"""
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def make_image(self, renderer, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image '
'array or the image attribute')
if self._imcache is None:
if self._A.dtype == np.uint8 and len(self._A.shape) == 3:
im = _image.frombyte(self._A, 0)
im.is_grayscale = False
else:
if self._rgbacache is None:
x = self.to_rgba(self._A, bytes=True)
self._rgbacache = x
else:
x = self._rgbacache
im = _image.frombyte(x, 0)
if len(self._A.shape) == 2:
im.is_grayscale = self.cmap.is_gray()
else:
im.is_grayscale = False
self._imcache = im
if self.origin == 'upper':
im.flipud_in()
else:
im = self._imcache
# image input dimensions
im.reset_matrix()
im.set_interpolation(self._interpd[self._interpolation])
im.set_resample(self._resample)
l, b, r, t = self.get_window_extent(renderer).extents # bbox.extents
widthDisplay = abs(round(r) - round(l))
heightDisplay = abs(round(t) - round(b))
widthDisplay *= magnification
heightDisplay *= magnification
numrows, numcols = self._A.shape[:2]
if (not self.interp_at_native and
widthDisplay == numcols and heightDisplay == numrows):
im.set_interpolation(0)
# resize viewport to display
rx = widthDisplay / numcols
ry = heightDisplay / numrows
#im.apply_scaling(rx*sx, ry*sy)
im.apply_scaling(rx, ry)
#im.resize(int(widthDisplay+0.5), int(heightDisplay+0.5),
# norm=self._filternorm, radius=self._filterrad)
im.resize(int(widthDisplay), int(heightDisplay),
norm=self._filternorm, radius=self._filterrad)
return im
@allow_rasterization
def draw(self, renderer, *args, **kwargs):
if not self.get_visible():
return
# todo: we should be able to do some cacheing here
image_mag = renderer.get_image_magnification()
im = self.make_image(renderer, image_mag)
x0, y0, x1, y1 = self.get_window_extent(renderer).extents
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_alpha(self.get_alpha())
#gc.set_clip_path(self.get_clip_path())
l = np.min([x0, x1])
b = np.min([y0, y1])
renderer.draw_image(gc, round(l), round(b), im)
gc.restore()
def imread(fname, format=None):
"""
Read an image from a file into an array.
*fname* may be a string path or a Python file-like object. If
using a file object, it must be opened in binary mode.
If *format* is provided, will try to read file of that type,
otherwise the format is deduced from the filename. If nothing can
be deduced, PNG is tried.
Return value is a :class:`numpy.array`. For grayscale images, the
return array is MxN. For RGB images, the return value is MxNx3.
For RGBA images the return value is MxNx4.
matplotlib can only read PNGs natively, but if `PIL
<http://www.pythonware.com/products/pil/>`_ is installed, it will
use it to load the image and return an array (if possible) which
can be used with :func:`~matplotlib.pyplot.imshow`.
"""
def pilread(fname):
"""try to load the image with PIL or return None"""
try:
from PIL import Image
except ImportError:
return None
if cbook.is_string_like(fname):
# force close the file after reading the image
with open(fname, "rb") as fh:
image = Image.open(fh)
return pil_to_array(image)
else:
image = Image.open(fname)
return pil_to_array(image)
handlers = {'png': _png.read_png, }
if format is None:
if cbook.is_string_like(fname):
basename, ext = os.path.splitext(fname)
ext = ext.lower()[1:]
elif hasattr(fname, 'name'):
basename, ext = os.path.splitext(fname.name)
ext = ext.lower()[1:]
else:
ext = 'png'
else:
ext = format
if ext not in handlers:
im = pilread(fname)
if im is None:
raise ValueError('Only know how to handle extensions: %s; '
'with PIL installed matplotlib can handle '
'more images' % list(six.iterkeys(handlers)))
return im
handler = handlers[ext]
# To handle Unicode filenames, we pass a file object to the PNG
# reader extension, since Python handles them quite well, but it's
# tricky in C.
if cbook.is_string_like(fname):
with open(fname, 'rb') as fd:
return handler(fd)
else:
return handler(fname)
def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None,
origin=None, dpi=100):
"""
Save an array as in image file.
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python file-like object.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename.
*arr*:
An MxN (luminance), MxNx3 (RGB) or MxNx4 (RGBA) array.
Keyword arguments:
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* set the color scaling for the image by fixing the
values that map to the colormap color limits. If either *vmin*
or *vmax* is None, that limit is determined from the *arr*
min/max value.
*cmap*:
cmap is a colors.Colormap instance, e.g., cm.jet.
If None, default to the rc image.cmap value.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*origin*
[ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value.
*dpi*
The DPI to store in the metadata of the file. This does not affect the
resolution of the output image.
"""
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
figsize = [x / float(dpi) for x in (arr.shape[1], arr.shape[0])]
fig = Figure(figsize=figsize, dpi=dpi, frameon=False)
canvas = FigureCanvas(fig)
im = fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin)
fig.savefig(fname, dpi=dpi, format=format, transparent=True)
def pil_to_array(pilImage):
"""
Load a PIL image and return it as a numpy array. For grayscale
images, the return array is MxN. For RGB images, the return value
is MxNx3. For RGBA images the return value is MxNx4
"""
def toarray(im, dtype=np.uint8):
"""Return a 1D array of dtype."""
# Pillow wants us to use "tobytes"
if hasattr(im, 'tobytes'):
x_str = im.tobytes('raw', im.mode)
else:
x_str = im.tostring('raw', im.mode)
x = np.fromstring(x_str, dtype)
return x
if pilImage.mode in ('RGBA', 'RGBX'):
im = pilImage # no need to convert images
elif pilImage.mode == 'L':
im = pilImage # no need to luminance images
# return MxN luminance array
x = toarray(im)
x.shape = im.size[1], im.size[0]
return x
elif pilImage.mode == 'RGB':
#return MxNx3 RGB array
im = pilImage # no need to RGB images
x = toarray(im)
x.shape = im.size[1], im.size[0], 3
return x
elif pilImage.mode.startswith('I;16'):
# return MxN luminance array of uint16
im = pilImage
if im.mode.endswith('B'):
x = toarray(im, '>u2')
else:
x = toarray(im, '<u2')
x.shape = im.size[1], im.size[0]
return x.astype('=u2')
else: # try to convert to an rgba image
try:
im = pilImage.convert('RGBA')
except ValueError:
raise RuntimeError('Unknown image mode')
# return MxNx4 RGBA array
x = toarray(im)
x.shape = im.size[1], im.size[0], 4
return x
def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear',
preview=False):
"""
make a thumbnail of image in *infile* with output filename
*thumbfile*.
*infile* the image file -- must be PNG or PIL readable if you
have `PIL <http://www.pythonware.com/products/pil/>`_ installed
*thumbfile*
the thumbnail filename
*scale*
the scale factor for the thumbnail
*interpolation*
the interpolation scheme used in the resampling
*preview*
if True, the default backend (presumably a user interface
backend) will be used which will cause a figure to be raised
if :func:`~matplotlib.pyplot.show` is called. If it is False,
a pure image backend will be used depending on the extension,
'png'->FigureCanvasAgg, 'pdf'->FigureCanvasPdf,
'svg'->FigureCanvasSVG
See examples/misc/image_thumbnail.py.
.. htmlonly::
:ref:`misc-image_thumbnail`
Return value is the figure instance containing the thumbnail
"""
basedir, basename = os.path.split(infile)
baseout, extout = os.path.splitext(thumbfile)
im = imread(infile)
rows, cols, depth = im.shape
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
height = float(rows)/dpi*scale
width = float(cols)/dpi*scale
extension = extout.lower()
if preview:
# let the UI backend do everything
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(width, height), dpi=dpi)
else:
if extension == '.png':
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
elif extension == '.pdf':
from matplotlib.backends.backend_pdf \
import FigureCanvasPdf as FigureCanvas
elif extension == '.svg':
from matplotlib.backends.backend_svg \
import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle "
"extensions 'png', 'svg' or 'pdf'")
from matplotlib.figure import Figure
fig = Figure(figsize=(width, height), dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation=interpolation)
fig.savefig(thumbfile, dpi=dpi)
return fig
| mit |
m-rossi/matplotlib2tikz | test/test_legend_best_location.py | 1 | 2236 | import matplotlib.pyplot as plt
import numpy as np
from helpers import assert_equality
def plot():
fig, ax = plt.subplots(3, 3, sharex="col", sharey="row")
axes = [ax[i][j] for i in range(len(ax)) for j in range(len(ax[i]))]
t = np.arange(0.0, 2.0 * np.pi, 0.4)
# Legend best location is "upper right"
(l,) = axes[0].plot(t, np.cos(t) * np.exp(-t), linewidth=0.5)
axes[0].legend((l,), ("UR",), loc=0)
# Legend best location is "upper left"
(l,) = axes[1].plot(t, np.cos(t) * np.exp(0.15 * t), linewidth=0.5)
axes[1].legend((l,), ("UL",), loc=0)
# Legend best location is "lower left"
(l,) = axes[2].plot(t, np.cos(5.0 * t) + 1, linewidth=0.5)
axes[2].legend((l,), ("LL",), loc=0)
# Legend best location is "lower right"
(l,) = axes[3].plot(
t, 2 * np.cos(5.0 * t) * np.exp(-0.5 * t) + 0.2 * t, linewidth=0.5
)
axes[3].legend((l,), ("LR",), loc=0)
# Legend best location is "center left"
(l,) = axes[4].plot(t[30:], 2 * np.cos(10 * t[30:]), linewidth=0.5)
axes[4].plot(t, -1.5 * np.ones_like(t), t, 1.5 * np.ones_like(t))
axes[4].legend((l,), ("CL",), loc=0)
# Legend best location is "center right"
(l,) = axes[5].plot(t[:30], 2 * np.cos(10 * t[:30]), linewidth=0.5)
axes[5].plot(t, -1.5 * np.ones_like(t), t, 1.5 * np.ones_like(t))
axes[5].legend((l,), ("CR",), loc=0)
# Legend best location is "lower center"
(l,) = axes[6].plot(t, -3 * np.cos(t) * np.exp(-0.1 * t), linewidth=0.5)
axes[6].legend((l,), ("LC",), loc=0)
# Legend best location is "upper center"
(l,) = axes[7].plot(t, 3 * np.cos(t) * np.exp(-0.1 * t), linewidth=0.5)
axes[7].legend((l,), ("UC",), loc=0)
# Legend best location is "center"
loc = axes[8].plot(
t[:10],
2 * np.cos(10 * t[:10]),
t[-10:],
2 * np.cos(10 * t[-10:]),
linewidth=0.5,
)
axes[8].plot(t, -2 * np.ones_like(t), t, 2 * np.ones_like(t))
axes[8].legend((loc,), ("C",), loc=0)
return fig
def test():
assert_equality(plot, "test_legend_best_location_reference.tex")
return
if __name__ == "__main__":
import helpers
helpers.compare_mpl_tex(plot)
# helpers.print_tree(plot())
| mit |
pyrocko/pyrocko | src/gui/snuffling.py | 1 | 56563 | # http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
'''
Snuffling infrastructure
This module provides the base class :py:class:`Snuffling` for user-defined
snufflings and some utilities for their handling.
'''
from __future__ import absolute_import
import os
import sys
import logging
import traceback
import tempfile
from .qt_compat import qc, qw, getSaveFileName, use_pyqt5
from pyrocko import pile, config
from pyrocko.util import quote
from .util import (ValControl, LinValControl, FigureFrame, WebKitFrame,
VTKFrame, PixmapFrame, Marker, EventMarker, PhaseMarker,
load_markers, save_markers)
if sys.version_info >= (3, 0):
from importlib import reload
Marker, load_markers, save_markers # noqa
logger = logging.getLogger('pyrocko.gui.snuffling')
def fnpatch(x):
if use_pyqt5:
return x
else:
return x, None
class MyFrame(qw.QFrame):
widgetVisibilityChanged = qc.pyqtSignal(bool)
def showEvent(self, ev):
self.widgetVisibilityChanged.emit(True)
def hideEvent(self, ev):
self.widgetVisibilityChanged.emit(False)
class Param(object):
'''
Definition of an adjustable floating point parameter for the
snuffling. The snuffling may display controls for user input for
such parameters.
:param name: labels the parameter on the snuffling's control panel
:param ident: identifier of the parameter
:param default: default value
:param minimum: minimum value for the parameter
:param maximum: maximum value for the parameter
:param low_is_none: if ``True``: parameter is set to None at lowest value
of parameter range (optional)
:param high_is_none: if ``True``: parameter is set to None at highest value
of parameter range (optional)
:param low_is_zero: if ``True``: parameter is set to value 0 at lowest
value of parameter range (optional)
'''
def __init__(
self, name, ident, default, minimum, maximum,
low_is_none=None,
high_is_none=None,
low_is_zero=False):
if low_is_none and default == minimum:
default = None
if high_is_none and default == maximum:
default = None
self.name = name
self.ident = ident
self.default = default
self.minimum = minimum
self.maximum = maximum
self.low_is_none = low_is_none
self.high_is_none = high_is_none
self.low_is_zero = low_is_zero
self._control = None
class Switch(object):
'''
Definition of a boolean switch for the snuffling. The snuffling
may display a checkbox for such a switch.
:param name: labels the switch on the snuffling's control panel
:param ident: identifier of the parameter
:param default: default value
'''
def __init__(self, name, ident, default):
self.name = name
self.ident = ident
self.default = default
class Choice(object):
'''
Definition of a string choice for the snuffling. The snuffling
may display a menu for such a choice.
:param name: labels the menu on the snuffling's control panel
:param ident: identifier of the parameter
:param default: default value
:param choices: tuple of other options
'''
def __init__(self, name, ident, default, choices):
self.name = name
self.ident = ident
self.default = default
self.choices = choices
class Snuffling(object):
'''Base class for user snufflings.
Snufflings are plugins for snuffler (and other applications using the
:py:class:`pyrocko.pile_viewer.PileOverview` class defined in
``pile_viewer.py``). They can be added, removed and reloaded at runtime and
should provide a simple way of extending the functionality of snuffler.
A snuffling has access to all data available in a pile viewer, can process
this data and can create and add new traces and markers to the viewer.
'''
def __init__(self):
self._path = None
self._name = 'Untitled Snuffling'
self._viewer = None
self._tickets = []
self._markers = []
self._delete_panel = None
self._delete_menuitem = None
self._panel_parent = None
self._menu_parent = None
self._panel = None
self._menuitem = None
self._helpmenuitem = None
self._parameters = []
self._param_controls = {}
self._triggers = []
self._live_update = True
self._previous_output_filename = None
self._previous_input_filename = None
self._previous_input_directory = None
self._tempdir = None
self._iplot = 0
self._have_pre_process_hook = False
self._have_post_process_hook = False
self._pre_process_hook_enabled = False
self._post_process_hook_enabled = False
self._no_viewer_pile = None
self._cli_params = {}
self._filename = None
self._force_panel = False
def setup(self):
'''
Setup the snuffling.
This method should be implemented in subclass and contain e.g. calls to
:py:meth:`set_name` and :py:meth:`add_parameter`.
'''
pass
def module_dir(self):
'''
Returns the path of the directory where snufflings are stored.
The default path is ``$HOME/.snufflings``.
'''
return self._path
def init_gui(self, viewer, panel_parent, menu_parent, reloaded=False):
'''
Set parent viewer and hooks to add panel and menu entry.
This method is called from the
:py:class:`pyrocko.pile_viewer.PileOverview` object. Calls
:py:meth:`setup_gui`.
'''
self._viewer = viewer
self._panel_parent = panel_parent
self._menu_parent = menu_parent
self.setup_gui(reloaded=reloaded)
def setup_gui(self, reloaded=False):
'''
Create and add gui elements to the viewer.
This method is initially called from :py:meth:`init_gui`. It is also
called, e.g. when new parameters have been added or if the name of the
snuffling has been changed.
'''
if self._panel_parent is not None:
self._panel = self.make_panel(self._panel_parent)
if self._panel:
self._panel_parent.add_panel(
self.get_name(), self._panel, reloaded)
if self._menu_parent is not None:
self._menuitem = self.make_menuitem(self._menu_parent)
self._helpmenuitem = self.make_helpmenuitem(self._menu_parent)
if self._menuitem:
self._menu_parent.add_snuffling_menuitem(self._menuitem)
if self._helpmenuitem:
self._menu_parent.add_snuffling_help_menuitem(
self._helpmenuitem)
def set_force_panel(self, bool=True):
'''
Force to create a panel.
:param bool: if ``True`` will create a panel with Help, Clear and Run
button.
'''
self._force_panel = bool
def make_cli_parser1(self):
import optparse
class MyOptionParser(optparse.OptionParser):
def error(self, msg):
logger.error(msg)
self.exit(1)
parser = MyOptionParser()
parser.add_option(
'--format',
dest='format',
default='from_extension',
choices=(
'mseed', 'sac', 'kan', 'segy', 'seisan', 'seisan.l',
'seisan.b', 'gse1', 'gcf', 'yaff', 'datacube',
'from_extension', 'detect'),
help='assume files are of given FORMAT [default: \'%default\']')
parser.add_option(
'--pattern',
dest='regex',
metavar='REGEX',
help='only include files whose paths match REGEX')
self.add_params_to_cli_parser(parser)
self.configure_cli_parser(parser)
return parser
def configure_cli_parser(self, parser):
pass
def cli_usage(self):
return None
def add_params_to_cli_parser(self, parser):
for param in self._parameters:
if isinstance(param, Param):
parser.add_option(
'--' + param.ident,
dest=param.ident,
default=param.default,
type='float',
help=param.name)
def setup_cli(self):
self.setup()
parser = self.make_cli_parser1()
(options, args) = parser.parse_args()
for param in self._parameters:
if isinstance(param, Param):
setattr(self, param.ident, getattr(options, param.ident))
self._cli_params['regex'] = options.regex
self._cli_params['format'] = options.format
self._cli_params['sources'] = args
return options, args, parser
def delete_gui(self):
'''
Remove the gui elements of the snuffling.
This removes the panel and menu entry of the widget from the viewer and
also removes all traces and markers added with the
:py:meth:`add_traces` and :py:meth:`add_markers` methods.
'''
self.cleanup()
if self._panel is not None:
self._panel_parent.remove_panel(self._panel)
self._panel = None
if self._menuitem is not None:
self._menu_parent.remove_snuffling_menuitem(self._menuitem)
self._menuitem = None
if self._helpmenuitem is not None:
self._menu_parent.remove_snuffling_help_menuitem(
self._helpmenuitem)
def set_name(self, name):
'''
Set the snuffling's name.
The snuffling's name is shown as a menu entry and in the panel header.
'''
self._name = name
self.reset_gui()
def get_name(self):
'''
Get the snuffling's name.
'''
return self._name
def set_have_pre_process_hook(self, bool):
self._have_pre_process_hook = bool
self._live_update = False
self._pre_process_hook_enabled = False
self.reset_gui()
def set_have_post_process_hook(self, bool):
self._have_post_process_hook = bool
self._live_update = False
self._post_process_hook_enabled = False
self.reset_gui()
def set_have_pile_changed_hook(self, bool):
self._pile_ = False
def enable_pile_changed_notifications(self):
'''
Get informed when pile changed.
When activated, the :py:meth:`pile_changed` method is called on every
update in the viewer's pile.
'''
viewer = self.get_viewer()
viewer.pile_has_changed_signal.connect(
self.pile_changed)
def disable_pile_changed_notifications(self):
'''
Stop getting informed about changes in viewer's pile.
'''
viewer = self.get_viewer()
viewer.pile_has_changed_signal.disconnect(
self.pile_changed)
def pile_changed(self):
'''
Called when the connected viewer's pile has changed.
Must be activated with a call to
:py:meth:`enable_pile_changed_notifications`.
'''
pass
def reset_gui(self, reloaded=False):
'''
Delete and recreate the snuffling's panel.
'''
if self._panel or self._menuitem:
sett = self.get_settings()
self.delete_gui()
self.setup_gui(reloaded=reloaded)
self.set_settings(sett)
def show_message(self, kind, message):
'''
Display a message box.
:param kind: string defining kind of message
:param message: the message to be displayed
'''
try:
box = qw.QMessageBox(self.get_viewer())
box.setText('%s: %s' % (kind.capitalize(), message))
box.exec_()
except NoViewerSet:
pass
def error(self, message):
'''
Show an error message box.
:param message: specifying the error
'''
logger.error('%s: %s' % (self._name, message))
self.show_message('error', message)
def warn(self, message):
'''
Display a warning message.
:param message: specifying the warning
'''
logger.warning('%s: %s' % (self._name, message))
self.show_message('warning', message)
def fail(self, message):
'''
Show an error message box and raise :py:exc:`SnufflingCallFailed`
exception.
:param message: specifying the error
'''
self.error(message)
raise SnufflingCallFailed(message)
def pylab(self, name=None, get='axes'):
'''
Create a :py:class:`FigureFrame` and return either the frame,
a :py:class:`matplotlib.figure.Figure` instance or a
:py:class:`matplotlib.axes.Axes` instance.
:param name: labels the figure frame's tab
:param get: 'axes'|'figure'|'frame' (optional)
'''
if name is None:
self._iplot += 1
name = 'Plot %i (%s)' % (self._iplot, self.get_name())
fframe = FigureFrame()
self._panel_parent.add_tab(name, fframe)
if get == 'axes':
return fframe.gca()
elif get == 'figure':
return fframe.gcf()
elif get == 'figure_frame':
return fframe
def figure(self, name=None):
'''
Returns a :py:class:`matplotlib.figure.Figure` instance
which can be displayed within snuffler by calling
:py:meth:`canvas.draw`.
:param name: labels the tab of the figure
'''
return self.pylab(name=name, get='figure')
def axes(self, name=None):
'''
Returns a :py:class:`matplotlib.axes.Axes` instance.
:param name: labels the tab of axes
'''
return self.pylab(name=name, get='axes')
def figure_frame(self, name=None):
'''
Create a :py:class:`pyrocko.gui.util.FigureFrame`.
:param name: labels the tab figure frame
'''
return self.pylab(name=name, get='figure_frame')
def pixmap_frame(self, filename=None, name=None):
'''
Create a :py:class:`pyrocko.gui.util.PixmapFrame`.
:param name: labels the tab
:param filename: name of file to be displayed
'''
f = PixmapFrame(filename)
scroll_area = qw.QScrollArea()
scroll_area.setWidget(f)
scroll_area.setWidgetResizable(True)
self._panel_parent.add_tab(name or "Pixmap", scroll_area)
return f
def web_frame(self, url=None, name=None):
'''
Creates a :py:class:`WebKitFrame` which can be used as a browser
within snuffler.
:param url: url to open
:param name: labels the tab
'''
if name is None:
self._iplot += 1
name = 'Web browser %i (%s)' % (self._iplot, self.get_name())
f = WebKitFrame(url)
self._panel_parent.add_tab(name, f)
return f
def vtk_frame(self, name=None, actors=None):
'''
Create a :py:class:`pyrocko.gui.util.VTKFrame` to render interactive 3D
graphics.
:param actors: list of VTKActors
:param name: labels the tab
Initialize the interactive rendering by calling the frames'
:py:meth`initialize` method after having added all actors to the frames
renderer.
Requires installation of vtk including python wrapper.
'''
if name is None:
self._iplot += 1
name = 'VTK %i (%s)' % (self._iplot, self.get_name())
try:
f = VTKFrame(actors=actors)
except ImportError as e:
self.fail(e)
self._panel_parent.add_tab(name, f)
return f
def tempdir(self):
'''
Create a temporary directory and return its absolute path.
The directory and all its contents are removed when the Snuffling
instance is deleted.
'''
if self._tempdir is None:
self._tempdir = tempfile.mkdtemp('', 'snuffling-tmp-')
return self._tempdir
def set_live_update(self, live_update):
'''
Enable/disable live updating.
When live updates are enabled, the :py:meth:`call` method is called
whenever the user changes a parameter. If it is disabled, the user has
to initiate such a call manually by triggering the snuffling's menu
item or pressing the call button.
'''
self._live_update = live_update
if self._have_pre_process_hook:
self._pre_process_hook_enabled = live_update
if self._have_post_process_hook:
self._post_process_hook_enabled = live_update
try:
self.get_viewer().clean_update()
except NoViewerSet:
pass
def add_parameter(self, param):
'''
Add an adjustable parameter to the snuffling.
:param param: object of type :py:class:`Param`, :py:class:`Switch`, or
:py:class:`Choice`.
For each parameter added, controls are added to the snuffling's panel,
so that the parameter can be adjusted from the gui.
'''
self._parameters.append(param)
self._set_parameter_value(param.ident, param.default)
if self._panel is not None:
self.delete_gui()
self.setup_gui()
def add_trigger(self, name, method):
'''
Add a button to the snuffling's panel.
:param name: string that labels the button
:param method: method associated with the button
'''
self._triggers.append((name, method))
if self._panel is not None:
self.delete_gui()
self.setup_gui()
def get_parameters(self):
'''
Get the snuffling's adjustable parameter definitions.
Returns a list of objects of type :py:class:`Param`.
'''
return self._parameters
def get_parameter(self, ident):
'''
Get one of the snuffling's adjustable parameter definitions.
:param ident: identifier of the parameter
Returns an object of type :py:class:`Param` or ``None``.
'''
for param in self._parameters:
if param.ident == ident:
return param
return None
def set_parameter(self, ident, value):
'''
Set one of the snuffling's adjustable parameters.
:param ident: identifier of the parameter
:param value: new value of the parameter
Adjusts the control of a parameter without calling :py:meth:`call`.
'''
self._set_parameter_value(ident, value)
control = self._param_controls.get(ident, None)
if control:
control.set_value(value)
def set_parameter_range(self, ident, vmin, vmax):
'''
Set the range of one of the snuffling's adjustable parameters.
:param ident: identifier of the parameter
:param vmin,vmax: new minimum and maximum value for the parameter
Adjusts the control of a parameter without calling :py:meth:`call`.
'''
control = self._param_controls.get(ident, None)
if control:
control.set_range(vmin, vmax)
def set_parameter_choices(self, ident, choices):
'''
Update the choices of a Choice parameter.
:param ident: identifier of the parameter
:param choices: list of strings
'''
control = self._param_controls.get(ident, None)
if control:
selected_choice = control.set_choices(choices)
self._set_parameter_value(ident, selected_choice)
def _set_parameter_value(self, ident, value):
setattr(self, ident, value)
def get_parameter_value(self, ident):
'''
Get the current value of a parameter.
:param ident: identifier of the parameter
'''
return getattr(self, ident)
def get_settings(self):
'''
Returns a dictionary with identifiers of all parameters as keys and
their values as the dictionaries values.
'''
params = self.get_parameters()
settings = {}
for param in params:
settings[param.ident] = self.get_parameter_value(param.ident)
return settings
def set_settings(self, settings):
params = self.get_parameters()
dparams = dict([(param.ident, param) for param in params])
for k, v in settings.items():
if k in dparams:
self._set_parameter_value(k, v)
if k in self._param_controls:
control = self._param_controls[k]
control.set_value(v)
def get_viewer(self):
'''
Get the parent viewer.
Returns a reference to an object of type :py:class:`PileOverview`,
which is the main viewer widget.
If no gui has been initialized for the snuffling, a
:py:exc:`NoViewerSet` exception is raised.
'''
if self._viewer is None:
raise NoViewerSet()
return self._viewer
def get_pile(self):
'''
Get the pile.
If a gui has been initialized, a reference to the viewer's internal
pile is returned. If not, the :py:meth:`make_pile` method (which may be
overloaded in subclass) is called to create a pile. This can be
utilized to make hybrid snufflings, which may work also in a standalone
mode.
'''
try:
p = self.get_viewer().get_pile()
except NoViewerSet:
if self._no_viewer_pile is None:
self._no_viewer_pile = self.make_pile()
p = self._no_viewer_pile
return p
def get_active_event_and_stations(
self, trange=(-3600., 3600.), missing='warn'):
'''
Get event and stations with available data for active event.
:param trange: (begin, end), time range around event origin time to
query for available data
:param missing: string, what to do in case of missing station
information: ``'warn'``, ``'raise'`` or ``'ignore'``.
:returns: ``(event, stations)``
'''
p = self.get_pile()
v = self.get_viewer()
event = v.get_active_event()
if event is None:
self.fail(
'No active event set. Select an event and press "e" to make '
'it the "active event"')
stations = {}
for traces in p.chopper(
event.time+trange[0],
event.time+trange[1],
load_data=False,
degap=False):
for tr in traces:
try:
for skey in v.station_keys(tr):
if skey in stations:
continue
station = v.get_station(skey)
stations[skey] = station
except KeyError:
s = 'No station information for station key "%s".' \
% '.'.join(skey)
if missing == 'warn':
logger.warning(s)
elif missing == 'raise':
raise MissingStationInformation(s)
elif missing == 'ignore':
pass
else:
assert False, 'invalid argument to "missing"'
stations[skey] = None
return event, list(set(
st for st in stations.values() if st is not None))
def get_stations(self):
'''
Get all stations known to the viewer.
'''
v = self.get_viewer()
stations = list(v.stations.values())
return stations
def get_markers(self):
'''
Get all markers from the viewer.
'''
return self.get_viewer().get_markers()
def get_event_markers(self):
'''
Get all event markers from the viewer.
'''
return [m for m in self.get_viewer().get_markers()
if isinstance(m, EventMarker)]
def get_selected_markers(self):
'''
Get all selected markers from the viewer.
'''
return self.get_viewer().selected_markers()
def get_selected_event_markers(self):
'''
Get all selected event markers from the viewer.
'''
return [m for m in self.get_viewer().selected_markers()
if isinstance(m, EventMarker)]
def get_active_event_and_phase_markers(self):
'''
Get the marker of the active event and any associated phase markers
'''
viewer = self.get_viewer()
markers = viewer.get_markers()
event_marker = viewer.get_active_event_marker()
if event_marker is None:
self.fail(
'No active event set. '
'Select an event and press "e" to make it the "active event"')
event = event_marker.get_event()
selection = []
for m in markers:
if isinstance(m, PhaseMarker):
if m.get_event() is event:
selection.append(m)
return (
event_marker,
[m for m in markers if isinstance(m, PhaseMarker) and
m.get_event() == event])
def get_viewer_trace_selector(self, mode='inview'):
'''
Get currently active trace selector from viewer.
:param mode: set to ``'inview'`` (default) to only include selections
currently shown in the viewer, ``'visible' to include all traces
not currenly hidden by hide or quick-select commands, or ``'all'``
to disable any restrictions.
'''
viewer = self.get_viewer()
def rtrue(tr):
return True
if mode == 'inview':
return viewer.trace_selector or rtrue
elif mode == 'visible':
return viewer.trace_filter or rtrue
elif mode == 'all':
return rtrue
else:
raise Exception('invalid mode argument')
def chopper_selected_traces(self, fallback=False, marker_selector=None,
mode='inview', main_bandpass='False',
*args, **kwargs):
'''
Iterate over selected traces.
Shortcut to get all trace data contained in the selected markers in the
running snuffler. For each selected marker,
:py:meth:`pyrocko.pile.Pile.chopper` is called with the arguments
*tmin*, *tmax*, and *trace_selector* set to values according to the
marker. Additional arguments to the chopper are handed over from
*\\*args* and *\\*\\*kwargs*.
:param fallback: if ``True``, if no selection has been marked, use the
content currently visible in the viewer.
:param marker_selector: if not ``None`` a callback to filter markers.
:param mode: set to ``'inview'`` (default) to only include selections
currently shown in the viewer (excluding traces accessible
through vertical scrolling), ``'visible'`` to include all
traces not currenly hidden by hide or quick-select commands
(including traces accessible through vertical scrolling), or
``'all'`` to disable any restrictions.
:param main_bandpass: if ``True``, apply main control high- and lowpass
filters to traces.
'''
try:
viewer = self.get_viewer()
markers = [
m for m in viewer.selected_markers()
if not isinstance(m, EventMarker)]
if marker_selector is not None:
markers = [
marker for marker in markers if marker_selector(marker)]
pile = self.get_pile()
def rtrue(tr):
return True
trace_selector_arg = kwargs.pop('trace_selector', rtrue)
trace_selector_viewer = self.get_viewer_trace_selector(mode)
if main_bandpass:
def apply_filters(traces):
for tr in traces:
if viewer.highpass is not None:
tr.highpass(4, viewer.highpass)
if viewer.lowpass is not None:
tr.lowpass(4, viewer.lowpass)
return traces
else:
def apply_filters(traces):
return traces
if markers:
for marker in markers:
if not marker.nslc_ids:
trace_selector_marker = rtrue
else:
def trace_selector_marker(tr):
return marker.match_nslc(tr.nslc_id)
def trace_selector(tr):
return trace_selector_arg(tr) \
and trace_selector_viewer(tr) \
and trace_selector_marker(tr)
for traces in pile.chopper(
tmin=marker.tmin,
tmax=marker.tmax,
trace_selector=trace_selector,
*args,
**kwargs):
yield apply_filters(traces)
elif fallback:
def trace_selector(tr):
return trace_selector_arg(tr) \
and trace_selector_viewer(tr)
tmin, tmax = viewer.get_time_range()
for traces in pile.chopper(
tmin=tmin,
tmax=tmax,
trace_selector=trace_selector,
*args,
**kwargs):
yield apply_filters(traces)
else:
raise NoTracesSelected()
except NoViewerSet:
pile = self.get_pile()
for traces in pile.chopper(*args, **kwargs):
yield traces
def get_selected_time_range(self, fallback=False):
'''
Get the time range spanning all selected markers.
:param fallback: if ``True`` and no marker is selected return begin and
end of visible time range
'''
viewer = self.get_viewer()
markers = viewer.selected_markers()
mins = [marker.tmin for marker in markers]
maxs = [marker.tmax for marker in markers]
if mins and maxs:
tmin = min(mins)
tmax = max(maxs)
elif fallback:
tmin, tmax = viewer.get_time_range()
else:
raise NoTracesSelected()
return tmin, tmax
def panel_visibility_changed(self, bool):
'''
Called when the snuffling's panel becomes visible or is hidden.
Can be overloaded in subclass, e.g. to perform additional setup actions
when the panel is activated the first time.
'''
pass
def make_pile(self):
'''
Create a pile.
To be overloaded in subclass. The default implementation just calls
:py:func:`pyrocko.pile.make_pile` to create a pile from command line
arguments.
'''
cachedirname = config.config().cache_dir
sources = self._cli_params.get('sources', sys.argv[1:])
return pile.make_pile(
sources,
cachedirname=cachedirname,
regex=self._cli_params['regex'],
fileformat=self._cli_params['format'])
def make_panel(self, parent):
'''
Create a widget for the snuffling's control panel.
Normally called from the :py:meth:`setup_gui` method. Returns ``None``
if no panel is needed (e.g. if the snuffling has no adjustable
parameters).
'''
params = self.get_parameters()
self._param_controls = {}
if params or self._force_panel:
sarea = MyScrollArea(parent.get_panel_parent_widget())
sarea.setFrameStyle(qw.QFrame.NoFrame)
sarea.setSizePolicy(qw.QSizePolicy(
qw.QSizePolicy.Expanding, qw.QSizePolicy.Expanding))
frame = MyFrame(sarea)
frame.widgetVisibilityChanged.connect(
self.panel_visibility_changed)
frame.setSizePolicy(qw.QSizePolicy(
qw.QSizePolicy.Expanding, qw.QSizePolicy.Minimum))
frame.setFrameStyle(qw.QFrame.NoFrame)
sarea.setWidget(frame)
sarea.setWidgetResizable(True)
layout = qw.QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
frame.setLayout(layout)
parlayout = qw.QGridLayout()
irow = 0
ipar = 0
have_switches = False
have_params = False
for iparam, param in enumerate(params):
if isinstance(param, Param):
if param.minimum <= 0.0:
param_control = LinValControl(
high_is_none=param.high_is_none,
low_is_none=param.low_is_none)
else:
param_control = ValControl(
high_is_none=param.high_is_none,
low_is_none=param.low_is_none,
low_is_zero=param.low_is_zero)
param_control.setup(
param.name,
param.minimum,
param.maximum,
param.default,
iparam)
param_control.valchange.connect(
self.modified_snuffling_panel)
self._param_controls[param.ident] = param_control
for iw, w in enumerate(param_control.widgets()):
parlayout.addWidget(w, ipar, iw)
ipar += 1
have_params = True
elif isinstance(param, Choice):
param_widget = ChoiceControl(
param.ident, param.default, param.choices, param.name)
param_widget.choosen.connect(
self.choose_on_snuffling_panel)
self._param_controls[param.ident] = param_widget
parlayout.addWidget(param_widget, ipar, 0, 1, 3)
ipar += 1
have_params = True
elif isinstance(param, Switch):
have_switches = True
if have_params:
parframe = qw.QFrame(sarea)
parframe.setSizePolicy(qw.QSizePolicy(
qw.QSizePolicy.Expanding, qw.QSizePolicy.Minimum))
parframe.setLayout(parlayout)
layout.addWidget(parframe, irow, 0)
irow += 1
if have_switches:
swlayout = qw.QGridLayout()
isw = 0
for iparam, param in enumerate(params):
if isinstance(param, Switch):
param_widget = SwitchControl(
param.ident, param.default, param.name)
param_widget.sw_toggled.connect(
self.switch_on_snuffling_panel)
self._param_controls[param.ident] = param_widget
swlayout.addWidget(param_widget, isw/10, isw % 10)
isw += 1
swframe = qw.QFrame(sarea)
swframe.setSizePolicy(qw.QSizePolicy(
qw.QSizePolicy.Expanding, qw.QSizePolicy.Minimum))
swframe.setLayout(swlayout)
layout.addWidget(swframe, irow, 0)
irow += 1
butframe = qw.QFrame(sarea)
butframe.setSizePolicy(qw.QSizePolicy(
qw.QSizePolicy.Expanding, qw.QSizePolicy.Minimum))
butlayout = qw.QHBoxLayout()
butframe.setLayout(butlayout)
live_update_checkbox = qw.QCheckBox('Auto-Run')
if self._live_update:
live_update_checkbox.setCheckState(qc.Qt.Checked)
butlayout.addWidget(live_update_checkbox)
live_update_checkbox.toggled.connect(
self.live_update_toggled)
help_button = qw.QPushButton('Help')
butlayout.addWidget(help_button)
help_button.clicked.connect(
self.help_button_triggered)
clear_button = qw.QPushButton('Clear')
butlayout.addWidget(clear_button)
clear_button.clicked.connect(
self.clear_button_triggered)
call_button = qw.QPushButton('Run')
butlayout.addWidget(call_button)
call_button.clicked.connect(
self.call_button_triggered)
for name, method in self._triggers:
but = qw.QPushButton(name)
def call_and_update(method):
def f():
try:
method()
except SnufflingError as e:
if not isinstance(e, SnufflingCallFailed):
# those have logged within error()
logger.error('%s: %s' % (self._name, e))
logger.error(
'%s: Snuffling action failed' % self._name)
self.get_viewer().update()
return f
but.clicked.connect(
call_and_update(method))
butlayout.addWidget(but)
layout.addWidget(butframe, irow, 0)
irow += 1
spacer = qw.QSpacerItem(
0, 0, qw.QSizePolicy.Expanding, qw.QSizePolicy.Expanding)
layout.addItem(spacer, irow, 0)
return sarea
else:
return None
def make_helpmenuitem(self, parent):
'''
Create the help menu item for the snuffling.
'''
item = qw.QAction(self.get_name(), None)
item.triggered.connect(
self.help_button_triggered)
return item
def make_menuitem(self, parent):
'''
Create the menu item for the snuffling.
This method may be overloaded in subclass and return ``None``, if no
menu entry is wanted.
'''
item = qw.QAction(self.get_name(), None)
item.setCheckable(
self._have_pre_process_hook or self._have_post_process_hook)
item.triggered.connect(
self.menuitem_triggered)
return item
def output_filename(
self,
caption='Save File',
dir='',
filter='',
selected_filter=None):
'''
Query user for an output filename.
This is currently a wrapper to :py:func:`QFileDialog.getSaveFileName`.
A :py:exc:`UserCancelled` exception is raised if the user cancels the
dialog.
'''
if not dir and self._previous_output_filename:
dir = self._previous_output_filename
fn = getSaveFileName(
self.get_viewer(), caption, dir, filter, selected_filter)
if not fn:
raise UserCancelled()
self._previous_output_filename = fn
return str(fn)
def input_directory(self, caption='Open Directory', dir=''):
'''
Query user for an input directory.
This is a wrapper to :py:func:`QFileDialog.getExistingDirectory`.
A :py:exc:`UserCancelled` exception is raised if the user cancels the
dialog.
'''
if not dir and self._previous_input_directory:
dir = self._previous_input_directory
dn = qw.QFileDialog.getExistingDirectory(
None, caption, dir, qw.QFileDialog.ShowDirsOnly)
if not dn:
raise UserCancelled()
self._previous_input_directory = dn
return str(dn)
def input_filename(self, caption='Open File', dir='', filter='',
selected_filter=None):
'''
Query user for an input filename.
This is currently a wrapper to :py:func:`QFileDialog.getOpenFileName`.
A :py:exc:`UserCancelled` exception is raised if the user cancels the
dialog.
'''
if not dir and self._previous_input_filename:
dir = self._previous_input_filename
fn, _ = fnpatch(qw.QFileDialog.getOpenFileName(
self.get_viewer(),
caption,
dir,
filter)) # selected_filter)
if not fn:
raise UserCancelled()
self._previous_input_filename = fn
return str(fn)
def input_dialog(self, caption='', request='', directory=False):
'''
Query user for a text input.
This is currently a wrapper to :py:func:`QInputDialog.getText`.
A :py:exc:`UserCancelled` exception is raised if the user cancels the
dialog.
'''
inp, ok = qw.QInputDialog.getText(self.get_viewer(), 'Input', caption)
if not ok:
raise UserCancelled()
return inp
def modified_snuffling_panel(self, value, iparam):
'''
Called when the user has played with an adjustable parameter.
The default implementation sets the parameter, calls the snuffling's
:py:meth:`call` method and finally triggers an update on the viewer
widget.
'''
param = self.get_parameters()[iparam]
self._set_parameter_value(param.ident, value)
if self._live_update:
self.check_call()
self.get_viewer().update()
def switch_on_snuffling_panel(self, ident, state):
'''
Called when the user has toggled a switchable parameter.
'''
self._set_parameter_value(ident, state)
if self._live_update:
self.check_call()
self.get_viewer().update()
def choose_on_snuffling_panel(self, ident, state):
'''
Called when the user has made a choice about a choosable parameter.
'''
self._set_parameter_value(ident, state)
if self._live_update:
self.check_call()
self.get_viewer().update()
def menuitem_triggered(self, arg):
'''
Called when the user has triggered the snuffling's menu.
The default implementation calls the snuffling's :py:meth:`call` method
and triggers an update on the viewer widget.
'''
self.check_call()
if self._have_pre_process_hook:
self._pre_process_hook_enabled = arg
if self._have_post_process_hook:
self._post_process_hook_enabled = arg
if self._have_pre_process_hook or self._have_post_process_hook:
self.get_viewer().clean_update()
else:
self.get_viewer().update()
def call_button_triggered(self):
'''
Called when the user has clicked the snuffling's call button.
The default implementation calls the snuffling's :py:meth:`call` method
and triggers an update on the viewer widget.
'''
self.check_call()
self.get_viewer().update()
def clear_button_triggered(self):
'''
Called when the user has clicked the snuffling's clear button.
This calls the :py:meth:`cleanup` method and triggers an update on the
viewer widget.
'''
self.cleanup()
self.get_viewer().update()
def help_button_triggered(self):
'''
Creates a :py:class:`QLabel` which contains the documentation as
given in the snufflings' __doc__ string.
'''
if self.__doc__:
if self.__doc__.strip().startswith('<html>'):
doc = qw.QLabel(self.__doc__)
else:
try:
import markdown
doc = qw.QLabel(markdown.markdown(self.__doc__))
except ImportError:
logger.error(
'Install Python module "markdown" for pretty help '
'formatting.')
doc = qw.QLabel(self.__doc__)
else:
doc = qw.QLabel('This snuffling does not provide any online help.')
labels = [doc]
if self._filename:
from html import escape
code = open(self._filename, 'r').read()
doc_src = qw.QLabel(
'''<html><body>
<hr />
<center><em>May the source be with you, young Skywalker!</em><br /><br />
<a href="file://%s"><code>%s</code></a></center>
<br />
<p style="margin-left: 2em; margin-right: 2em; background-color:#eed;">
<pre style="white-space: pre-wrap"><code>%s
</code></pre></p></body></html>'''
% (
quote(self._filename),
escape(self._filename),
escape(code)))
labels.append(doc_src)
for h in labels:
h.setAlignment(qc.Qt.AlignTop | qc.Qt.AlignLeft)
h.setWordWrap(True)
self._viewer.show_doc('Help: %s' % self._name, labels, target='panel')
def live_update_toggled(self, on):
'''
Called when the checkbox for live-updates has been toggled.
'''
self.set_live_update(on)
def add_traces(self, traces):
'''
Add traces to the viewer.
:param traces: list of objects of type :py:class:`pyrocko.trace.Trace`
The traces are put into a :py:class:`pyrocko.pile.MemTracesFile` and
added to the viewer's internal pile for display. Note, that unlike with
the traces from the files given on the command line, these traces are
kept in memory and so may quickly occupy a lot of ram if a lot of
traces are added.
This method should be preferred over modifying the viewer's internal
pile directly, because this way, the snuffling has a chance to
automatically remove its private traces again (see :py:meth:`cleanup`
method).
'''
ticket = self.get_viewer().add_traces(traces)
self._tickets.append(ticket)
return ticket
def add_trace(self, tr):
'''
Add a trace to the viewer.
See :py:meth:`add_traces`.
'''
self.add_traces([tr])
def add_markers(self, markers):
'''
Add some markers to the display.
Takes a list of objects of type :py:class:`pyrocko.gui.util.Marker` and
adds these to the viewer.
'''
self.get_viewer().add_markers(markers)
self._markers.extend(markers)
def add_marker(self, marker):
'''
Add a marker to the display.
See :py:meth:`add_markers`.
'''
self.add_markers([marker])
def cleanup(self):
'''
Remove all traces and markers which have been added so far by the
snuffling.
'''
try:
viewer = self.get_viewer()
viewer.release_data(self._tickets)
viewer.remove_markers(self._markers)
except NoViewerSet:
pass
self._tickets = []
self._markers = []
def check_call(self):
try:
self.call()
return 0
except SnufflingError as e:
if not isinstance(e, SnufflingCallFailed):
# those have logged within error()
logger.error('%s: %s' % (self._name, e))
logger.error('%s: Snuffling action failed' % self._name)
return 1
except Exception:
logger.exception(
'%s: Snuffling action raised an exception' % self._name)
def call(self):
'''
Main work routine of the snuffling.
This method is called when the snuffling's menu item has been triggered
or when the user has played with the panel controls. To be overloaded
in subclass. The default implementation does nothing useful.
'''
pass
def pre_process_hook(self, traces):
return traces
def post_process_hook(self, traces):
return traces
def get_tpad(self):
'''
Return current amount of extra padding needed by live processing hooks.
'''
return 0.0
def pre_destroy(self):
'''
Called when the snuffling instance is about to be deleted.
Can be overloaded to do user-defined cleanup actions. The
default implementation calls :py:meth:`cleanup` and deletes
the snuffling`s tempory directory, if needed.
'''
self.cleanup()
if self._tempdir is not None:
import shutil
shutil.rmtree(self._tempdir)
class SnufflingError(Exception):
pass
class NoViewerSet(SnufflingError):
'''
This exception is raised, when no viewer has been set on a Snuffling.
'''
def __str__(self):
return 'No GUI available. ' \
'Maybe this Snuffling cannot be run in command line mode?'
class MissingStationInformation(SnufflingError):
'''
Raised when station information is missing.
'''
class NoTracesSelected(SnufflingError):
'''
This exception is raised, when no traces have been selected in the viewer
and we cannot fallback to using the current view.
'''
def __str__(self):
return 'No traces have been selected / are available.'
class UserCancelled(SnufflingError):
'''
This exception is raised, when the user has cancelled a snuffling dialog.
'''
def __str__(self):
return 'The user has cancelled a dialog.'
class SnufflingCallFailed(SnufflingError):
'''
This exception is raised, when :py:meth:`Snuffling.fail` is called from
:py:meth:`Snuffling.call`.
'''
class InvalidSnufflingFilename(Exception):
pass
class SnufflingModule(object):
'''
Utility class to load/reload snufflings from a file.
The snufflings are created by user modules which have the special function
:py:func:`__snufflings__` which return the snuffling instances to be
exported. The snuffling module is attached to a handler class, which makes
use of the snufflings (e.g. :py:class:`pyrocko.pile_viewer.PileOverwiew`
from ``pile_viewer.py``). The handler class must implement the methods
``add_snuffling()`` and ``remove_snuffling()`` which are used as callbacks.
The callbacks are utilized from the methods :py:meth:`load_if_needed` and
:py:meth:`remove_snufflings` which may be called from the handler class,
when needed.
'''
mtimes = {}
def __init__(self, path, name, handler):
self._path = path
self._name = name
self._mtime = None
self._module = None
self._snufflings = []
self._handler = handler
def load_if_needed(self):
filename = os.path.join(self._path, self._name+'.py')
try:
mtime = os.stat(filename)[8]
except OSError as e:
if e.errno == 2:
logger.error(e)
raise BrokenSnufflingModule(filename)
if self._module is None:
sys.path[0:0] = [self._path]
try:
logger.debug('Loading snuffling module %s' % filename)
if self._name in sys.modules:
raise InvalidSnufflingFilename(self._name)
self._module = __import__(self._name)
del sys.modules[self._name]
for snuffling in self._module.__snufflings__():
snuffling._filename = filename
self.add_snuffling(snuffling)
except Exception:
logger.error(traceback.format_exc())
raise BrokenSnufflingModule(filename)
finally:
sys.path[0:1] = []
elif self._mtime != mtime:
logger.warning('Reloading snuffling module %s' % filename)
settings = self.remove_snufflings()
sys.path[0:0] = [self._path]
try:
sys.modules[self._name] = self._module
reload(self._module)
del sys.modules[self._name]
for snuffling in self._module.__snufflings__():
snuffling._filename = filename
self.add_snuffling(snuffling, reloaded=True)
if len(self._snufflings) == len(settings):
for sett, snuf in zip(settings, self._snufflings):
snuf.set_settings(sett)
except Exception:
logger.error(traceback.format_exc())
raise BrokenSnufflingModule(filename)
finally:
sys.path[0:1] = []
self._mtime = mtime
def add_snuffling(self, snuffling, reloaded=False):
snuffling._path = self._path
snuffling.setup()
self._snufflings.append(snuffling)
self._handler.add_snuffling(snuffling, reloaded=reloaded)
def remove_snufflings(self):
settings = []
for snuffling in self._snufflings:
settings.append(snuffling.get_settings())
self._handler.remove_snuffling(snuffling)
self._snufflings = []
return settings
class BrokenSnufflingModule(Exception):
pass
class MyScrollArea(qw.QScrollArea):
def sizeHint(self):
s = qc.QSize()
s.setWidth(self.widget().sizeHint().width())
s.setHeight(self.widget().sizeHint().height())
return s
class SwitchControl(qw.QCheckBox):
sw_toggled = qc.pyqtSignal(object, bool)
def __init__(self, ident, default, *args):
qw.QCheckBox.__init__(self, *args)
self.ident = ident
self.setChecked(default)
self.toggled.connect(self._sw_toggled)
def _sw_toggled(self, state):
self.sw_toggled.emit(self.ident, state)
def set_value(self, state):
self.blockSignals(True)
self.setChecked(state)
self.blockSignals(False)
class ChoiceControl(qw.QFrame):
choosen = qc.pyqtSignal(object, object)
def __init__(self, ident, default, choices, name, *args):
qw.QFrame.__init__(self, *args)
self.label = qw.QLabel(name, self)
self.label.setMinimumWidth(120)
self.cbox = qw.QComboBox(self)
self.layout = qw.QHBoxLayout(self)
self.layout.addWidget(self.label)
self.layout.addWidget(self.cbox)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.ident = ident
self.choices = choices
for ichoice, choice in enumerate(choices):
self.cbox.addItem(choice)
self.set_value(default)
self.cbox.activated.connect(self.emit_choosen)
def set_choices(self, choices):
icur = self.cbox.currentIndex()
if icur != -1:
selected_choice = choices[icur]
else:
selected_choice = None
self.choices = choices
self.cbox.clear()
for ichoice, choice in enumerate(choices):
self.cbox.addItem(qc.QString(choice))
if selected_choice is not None and selected_choice in choices:
self.set_value(selected_choice)
return selected_choice
else:
self.set_value(choices[0])
return choices[0]
def emit_choosen(self, i):
self.choosen.emit(
self.ident,
self.choices[i])
def set_value(self, v):
self.cbox.blockSignals(True)
for i, choice in enumerate(self.choices):
if choice == v:
self.cbox.setCurrentIndex(i)
self.cbox.blockSignals(False)
| gpl-3.0 |
mhue/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
Datawheel/datausa-site | mobilitycovid19.py | 2 | 4125 | import pandas as pd
import os
stateToFips = {"AL": "04000US01", "AK": "04000US02", "AZ": "04000US04", "AR": "04000US05", "CA": "04000US06",
"CO": "04000US08", "CT": "04000US09", "DE": "04000US10", "DC": "04000US11", "FL": "04000US12",
"GA": "04000US13", "HI": "04000US15", "ID": "04000US16", "IL": "04000US17", "IN": "04000US18",
"IA": "04000US19", "KS": "04000US20", "KY": "04000US21", "LA": "04000US22", "ME": "04000US23",
"MD": "04000US24", "MA": "04000US25", "MI": "04000US26", "MN": "04000US27", "MS": "04000US28",
"MO": "04000US29", "MT": "04000US30", "NE": "04000US31", "NV": "04000US32", "NH": "04000US33",
"NJ": "04000US34", "NM": "04000US35", "NY": "04000US36", "NC": "04000US37", "ND": "04000US38",
"OH": "04000US39", "OK": "04000US40", "OR": "04000US41", "PA": "04000US42", "RI": "04000US44",
"SC": "04000US45", "SD": "04000US46", "TN": "04000US47", "TX": "04000US48", "UT": "04000US49",
"VT": "04000US50", "VA": "04000US51", "WA": "04000US53", "WV": "04000US54", "WI": "04000US55",
"WY": "04000US56"}
states = {"Alabama": "AL", "Alaska": "AK", "Arizona": "AZ", "Arkansas": "AR", "California": "CA", "Colorado": "CO",
"Connecticut": "CT", "District of Columbia": "DC", "Delaware": "DE", "Florida": "FL", "Georgia": "GA",
"Hawaii": "HI", "Idaho": "ID", "Illinois": "IL", "Indiana": "IN", "Iowa": "IA", "Kansas": "KS",
"Kentucky": "KY", "Louisiana": "LA", "Maine": "ME", "Maryland": "MD", "Massachusetts": "MA", "Michigan": "MI",
"Minnesota": "MN", "Mississippi": "MS", "Missouri": "MO", "Montana": "MT", "Nebraska": "NE", "Nevada": "NV",
"New Hampshire": "NH", "New Jersey": "NJ", "New Mexico": "NM", "New York": "NY", "North Carolina": "NC",
"North Dakota": "ND", "Ohio": "OH", "Oklahoma": "OK", "Oregon": "OR", "Pennsylvania": "PA",
"Rhode Island": "RI", "South Carolina": "SC", "South Dakota": "SD", "Tennessee": "TN", "Texas": "TX",
"Utah": "UT", "Vermont": "VT", "Virginia": "VA", "Washington": "WA", "West Virginia": "WV",
"Wisconsin": "WI", "Wyoming": "WY", "Chicago": "IL"}
df_google = pd.read_csv("https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv", low_memory=False)
df_google = df_google[df_google["country_region_code"] == "US"]
df_google = df_google[(~df_google["sub_region_1"].isna()) & (df_google["sub_region_2"].isna())]
df_google = df_google.melt(
id_vars=["country_region", "sub_region_1", "date"],
value_vars=[
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline",
"residential_percent_change_from_baseline"
]
)
df_google["variable"] = df_google["variable"].replace({
"retail_and_recreation_percent_change_from_baseline": "Retail and Recreation",
"grocery_and_pharmacy_percent_change_from_baseline": "Grocery and Pharmacy",
"parks_percent_change_from_baseline": "Parks",
"transit_stations_percent_change_from_baseline": "Transit Stations",
"workplaces_percent_change_from_baseline": "Workplaces",
"residential_percent_change_from_baseline": "Residential"
})
df_google = df_google.drop(columns=["country_region"])
df_google = df_google.rename(columns={
"sub_region_1": "Geography",
"date": "Date",
"variable": "Type",
"value": "Percent Change from Baseline"
})
df_google = df_google[~df_google["Geography"].isna()]
df_google["ID Geography"] = df_google["Geography"].replace(states).replace(stateToFips)
df_google["Date"] = df_google["Date"].str.replace("-", "/")
path = os.path.dirname(os.path.abspath("__file__")) + "/static/mobilitycovid19.json"
previous = pd.read_json(path) if os.path.exists(path) else pd.DataFrame([])
if len(df_google) > len(previous):
df_google.to_json(path, orient="records")
| mit |
averagehat/biopandas | tests/testbiopandas.py | 2 | 4473 | import mock
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import unittest
#from bioframes import bioframes as bf
from bioframes import sequenceframes
import sys
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_index_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from bioframes import bioframes
import numpy as np
from operator import itemgetter
if sys.version[0] == '2':
import __builtin__ as builtins # pylint:disable=import-error
else:
import builtins # pylint:disable=import-error
def mock_file(func, read_data, *args, **kwargs):
with mock.patch.object(builtins, 'open', mock.mock_open(read_data=read_data)): #, create = True) as m:
with open('_') as handle:
return func(handle, *args, **kwargs)
class TestFastq(unittest.TestCase):
def setUp(self):
self.r = SeqRecord(Seq("ACGTA"), id="Test", letter_annotations = {"phred_quality":[50, 40, 30, 20, 10]})
self.fastq_string = '''@read1
TTTCGAATC
+
FFFFFFFFF
@read2
CTTCGATC
+
AFFDDDDD
@read3
CCGATCAA
+
FF@@@F@F
@read4
TTTCGAATC
+
FFFFFFFFF
'''
with open('tmp.fq', 'w') as tmp: tmp.write(self.fastq_string)
fq = sequenceframes.fqframe(open('tmp.fq'))
#self.df = sequenceframes.load_fastq(open('tmp.fq'))
self.df = bioframes.makeframe(fq)
# self.df = fq.load_fastq()
# r_dict = fq.get_row(self.r)
# self.series = pd.Series(r_dict)
#r_dict = sequenceframes.get_row(self.r)
#TODO: somehow SeqIO broke when I tried to mock_open
# with mock.patch.object(builtins, 'open', mock.mock_open(read_data=self.fastq_string)): #, create = True) as m:
# with open('_') as handle:
# self.df = bf.load_fastq(handle)
#self.df = mock_file(bf.load_fastq, read_data=self.fastq_string)
def test_sanger_quality_error(self):
expected = np.array([.1, .01, .001, .0001, .00001][::-1])
assert_array_almost_equal(self.series['error'], expected)
def test_sanger_quality_string(self):
self.assertEquals(self.series['quality'], 'SI?5+')
def test_data_frame_lengths(self):
expected_len = len(self.fastq_string.split('\n')) / 4
self.assertEquals( expected_len, len(self.df))
def test_dupe_reads(self):
dupe_reads = self.df[self.df.seq == 'TTTCGAATC']
self.assertEquals(2, len(dupe_reads))
def test_dataframe_index(self):
expected_index = pd.Index(['read1', 'read2', 'read3', 'read4'])
assert_index_equal(expected_index, self.df.index)
def test_dataframe_contents(self):
columns = itemgetter( 'description','seq', 'quality', 'qual_ints', 'error')
qlen=len( 'TTTCGAATC')
expected1 = pd.Series(['read1', 'TTTCGAATC', 'FFFFFFFFF', np.array([37]*qlen), np.array( [0.0001]*qlen)])
expected3 = pd.Series(['read4', 'TTTCGAATC', 'FFFFFFFFF', np.array([37]*qlen), np.array( [0.0001]*qlen)])
r1, r4, r2 = map(pd.Series, [columns(self.df.ix['read1']), columns(self.df.ix['read4']), columns(self.df.ix['read2'])])
assert_series_equal( expected1, r1)
assert_series_equal( expected3, r4)
expected_error = np.array( [0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001])
expected_qual_ints = np.array( [32, 37, 37, 35, 35, 35, 35, 35])
expected2 = pd.Series(['read2', 'CTTCGATC', 'AFFDDDDD', expected_qual_ints, expected_error])
assert_series_equal(expected2, r2)
# def test_join_non_unique_dataframes(self):
# '''
# df1 and df2 share an index with duplicates, check that it is aligned correctly
# '''
# rows1 = [('A', 'A1'), ('B', 'B1'), ('A', 'A2'), ('C', 'C1')]
# rows2 = [('A', '0A', False), ('B', '0B', True), ('A', '00A', False), ('C', '00C', True)]
# self.df1, self.df2 = map(make_df_index0, (rows1, rows2))
# self.df1.columns = ['0', '1']
# self.df2.columns = ['0', '1', '2']
# self.df1, self.df2 = self.df1.set_index('0'), self.df2.set_index('0')
# result = a2f.join_non_unique_dataframes(self.df1, self.df2)
# expected = pd.DataFrame(
# [('A', 0, 'A1', '0A', True), ('B', 0, 'B1', '0B', True),
# ('A', 1, 'A2', '00A', False), ('C', 0, 'C1', '00C', True)]
# ).set_index(0).set_index(1, append=True)
# assert_frame_equal(result, expected)
| gpl-2.0 |
M-R-Houghton/euroscipy_2015 | bokeh/bokeh/charts/builder/tests/test_scatter_builder.py | 33 | 2895 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Scatter
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestScatter(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [(1, 2), (3, 3), (4, 7), (5, 5), (8, 26)]
xyvalues['pypy'] = [(1, 12), (2, 23), (4, 47), (5, 15), (8, 46)]
xyvalues['jython'] = [(1, 22), (2, 43), (4, 10), (6, 25), (8, 26)]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [2, 3, 7, 5, 26]
y_pypy = [12, 23, 47, 15, 46]
y_jython = [22, 43, 10, 25, 26]
x_python = [1, 3, 4, 5, 8]
x_pypy = [1, 2, 4, 5, 8]
x_jython = [1, 2, 4, 6, 8]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['x_python'], x_python)
assert_array_equal(builder._data['x_jython'], x_jython)
assert_array_equal(builder._data['x_pypy'], x_pypy)
lvalues = [xyvalues['python'], xyvalues['pypy'], xyvalues['jython']]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
assert_array_equal(builder._data['x_0'], x_python)
assert_array_equal(builder._data['x_1'], x_pypy)
assert_array_equal(builder._data['x_2'], x_jython)
| mit |
andrewchenshx/vnpy | vnpy/app/cta_strategy/backtesting.py | 1 | 37272 | from collections import defaultdict
from datetime import date, datetime, timedelta
from typing import Callable
from itertools import product
from functools import lru_cache
from time import time
import multiprocessing
import random
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pandas import DataFrame
from deap import creator, base, tools, algorithms
from vnpy.trader.constant import (Direction, Offset, Exchange,
Interval, Status)
from vnpy.trader.database import database_manager
from vnpy.trader.object import OrderData, TradeData, BarData, TickData
from vnpy.trader.utility import round_to
from .base import (
BacktestingMode,
EngineType,
STOPORDER_PREFIX,
StopOrder,
StopOrderStatus,
)
from .template import CtaTemplate
sns.set_style("whitegrid")
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
class OptimizationSetting:
"""
Setting for runnning optimization.
"""
def __init__(self):
""""""
self.params = {}
self.target_name = ""
def add_parameter(
self, name: str, start: float, end: float = None, step: float = None
):
""""""
if not end and not step:
self.params[name] = [start]
return
if start >= end:
print("参数优化起始点必须小于终止点")
return
if step <= 0:
print("参数优化步进必须大于0")
return
value = start
value_list = []
while value <= end:
value_list.append(value)
value += step
self.params[name] = value_list
def set_target(self, target_name: str):
""""""
self.target_name = target_name
def generate_setting(self):
""""""
keys = self.params.keys()
values = self.params.values()
products = list(product(*values))
settings = []
for p in products:
setting = dict(zip(keys, p))
settings.append(setting)
return settings
def generate_setting_ga(self):
""""""
settings_ga = []
settings = self.generate_setting()
for d in settings:
param = [tuple(i) for i in d.items()]
settings_ga.append(param)
return settings_ga
class BacktestingEngine:
""""""
engine_type = EngineType.BACKTESTING
gateway_name = "BACKTESTING"
def __init__(self):
""""""
self.vt_symbol = ""
self.symbol = ""
self.exchange = None
self.start = None
self.end = None
self.rate = 0
self.slippage = 0
self.size = 1
self.pricetick = 0
self.capital = 1_000_000
self.mode = BacktestingMode.BAR
self.strategy_class = None
self.strategy = None
self.tick: TickData
self.bar: BarData
self.datetime = None
self.interval = None
self.days = 0
self.callback = None
self.history_data = []
self.stop_order_count = 0
self.stop_orders = {}
self.active_stop_orders = {}
self.limit_order_count = 0
self.limit_orders = {}
self.active_limit_orders = {}
self.trade_count = 0
self.trades = {}
self.logs = []
self.daily_results = {}
self.daily_df = None
def clear_data(self):
"""
Clear all data of last backtesting.
"""
self.strategy = None
self.tick = None
self.bar = None
self.datetime = None
self.stop_order_count = 0
self.stop_orders.clear()
self.active_stop_orders.clear()
self.limit_order_count = 0
self.limit_orders.clear()
self.active_limit_orders.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
def set_parameters(
self,
vt_symbol: str,
interval: Interval,
start: datetime,
rate: float,
slippage: float,
size: float,
pricetick: float,
capital: int = 0,
end: datetime = None,
mode: BacktestingMode = BacktestingMode.BAR,
):
""""""
self.mode = mode
self.vt_symbol = vt_symbol
self.interval = Interval(interval)
self.rate = rate
self.slippage = slippage
self.size = size
self.pricetick = pricetick
self.start = start
self.symbol, exchange_str = self.vt_symbol.split(".")
self.exchange = Exchange(exchange_str)
if capital:
self.capital = capital
if end:
self.end = end
if mode:
self.mode = mode
def add_strategy(self, strategy_class: type, setting: dict):
""""""
self.strategy_class = strategy_class
self.strategy = strategy_class(
self, strategy_class.__name__, self.vt_symbol, setting
)
def load_data(self):
""""""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
if self.start >= self.end:
self.output("起始日期必须小于结束日期")
return
self.history_data.clear() # Clear previously loaded history data
# Load 30 days of data each time and allow for progress update
progress_delta = timedelta(days=30)
total_delta = self.end - self.start
start = self.start
end = self.start + progress_delta
progress = 0
while start < self.end:
if self.mode == BacktestingMode.BAR:
data = load_bar_data(
self.symbol,
self.exchange,
self.interval,
start,
end
)
else:
data = load_tick_data(
self.symbol,
self.exchange,
start,
end
)
self.history_data.extend(data)
progress += progress_delta / total_delta
progress = min(progress, 1)
progress_bar = "#" * int(progress * 10)
self.output(f"加载进度:{progress_bar} [{progress:.0%}]")
start = end
end += progress_delta
self.output(f"历史数据加载完成,数据量:{len(self.history_data)}")
def run_backtesting(self):
""""""
if self.mode == BacktestingMode.BAR:
func = self.new_bar
else:
func = self.new_tick
self.strategy.on_init()
# Use the first [days] of history data for initializing strategy
day_count = 0
ix = 0
for ix, data in enumerate(self.history_data):
if self.datetime and data.datetime.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
self.datetime = data.datetime
self.callback(data)
self.strategy.inited = True
self.output("策略初始化完成")
self.strategy.on_start()
self.strategy.trading = True
self.output("开始回放历史数据")
# Use the rest of history data for running backtesting
for data in self.history_data[ix:]:
func(data)
self.output("历史数据回放结束")
def calculate_result(self):
""""""
self.output("开始计算逐日盯市盈亏")
if not self.trades:
self.output("成交记录为空,无法计算")
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
d = trade.datetime.date()
daily_result = self.daily_results[d]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_close = 0
start_pos = 0
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(
pre_close, start_pos, self.size, self.rate, self.slippage
)
pre_close = daily_result.close_price
start_pos = daily_result.end_pos
# Generate dataframe
results = defaultdict(list)
for daily_result in self.daily_results.values():
for key, value in daily_result.__dict__.items():
results[key].append(value)
self.daily_df = DataFrame.from_dict(results).set_index("date")
self.output("逐日盯市盈亏计算完成")
return self.daily_df
def calculate_statistics(self, df: DataFrame = None, output=True):
""""""
self.output("开始计算策略统计指标")
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for init DataFrame
if df is None:
# Set all statistics to 0 if no trade.
start_date = ""
end_date = ""
total_days = 0
profit_days = 0
loss_days = 0
end_balance = 0
max_drawdown = 0
max_ddpercent = 0
total_net_pnl = 0
daily_net_pnl = 0
total_commission = 0
daily_commission = 0
total_slippage = 0
daily_slippage = 0
total_turnover = 0
daily_turnover = 0
total_trade_count = 0
daily_trade_count = 0
total_return = 0
annual_return = 0
daily_return = 0
return_std = 0
sharpe_ratio = 0
return_drawdown_ratio = 0
else:
# Calculate balance related time series data
df["balance"] = df["net_pnl"].cumsum() + self.capital
df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
df["highlevel"] = (
df["balance"].rolling(
min_periods=1, window=len(df), center=False).max()
)
df["drawdown"] = df["balance"] - df["highlevel"]
df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days = len(df)
profit_days = len(df[df["net_pnl"] > 0])
loss_days = len(df[df["net_pnl"] < 0])
end_balance = df["balance"].iloc[-1]
max_drawdown = df["drawdown"].min()
max_ddpercent = df["ddpercent"].min()
total_net_pnl = df["net_pnl"].sum()
daily_net_pnl = total_net_pnl / total_days
total_commission = df["commission"].sum()
daily_commission = total_commission / total_days
total_slippage = df["slippage"].sum()
daily_slippage = total_slippage / total_days
total_turnover = df["turnover"].sum()
daily_turnover = total_turnover / total_days
total_trade_count = df["trade_count"].sum()
daily_trade_count = total_trade_count / total_days
total_return = (end_balance / self.capital - 1) * 100
annual_return = total_return / total_days * 240
daily_return = df["return"].mean() * 100
return_std = df["return"].std() * 100
if return_std:
sharpe_ratio = daily_return / return_std * np.sqrt(240)
else:
sharpe_ratio = 0
return_drawdown_ratio = -total_return / max_ddpercent
# Output
if output:
self.output("-" * 30)
self.output(f"首个交易日:\t{start_date}")
self.output(f"最后交易日:\t{end_date}")
self.output(f"总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days}")
self.output(f"亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.2f}")
self.output(f"结束资金:\t{end_balance:,.2f}")
self.output(f"总收益率:\t{total_return:,.2f}%")
self.output(f"年化收益:\t{annual_return:,.2f}%")
self.output(f"最大回撤: \t{max_drawdown:,.2f}")
self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
self.output(f"总手续费:\t{total_commission:,.2f}")
self.output(f"总滑点:\t{total_slippage:,.2f}")
self.output(f"总成交金额:\t{total_turnover:,.2f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
self.output(f"日均手续费:\t{daily_commission:,.2f}")
self.output(f"日均滑点:\t{daily_slippage:,.2f}")
self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
self.output(f"日均成交笔数:\t{daily_trade_count}")
self.output(f"日均收益率:\t{daily_return:,.2f}%")
self.output(f"收益标准差:\t{return_std:,.2f}%")
self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")
statistics = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"capital": self.capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"total_net_pnl": total_net_pnl,
"daily_net_pnl": daily_net_pnl,
"total_commission": total_commission,
"daily_commission": daily_commission,
"total_slippage": total_slippage,
"daily_slippage": daily_slippage,
"total_turnover": total_turnover,
"daily_turnover": daily_turnover,
"total_trade_count": total_trade_count,
"daily_trade_count": daily_trade_count,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": daily_return,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
"return_drawdown_ratio": return_drawdown_ratio,
}
return statistics
def show_chart(self, df: DataFrame = None):
""""""
# Check DataFrame input exterior
if df is None:
df = self.daily_df
# Check for init DataFrame
if df is None:
return
plt.figure(figsize=(10, 16))
balance_plot = plt.subplot(4, 1, 1)
balance_plot.set_title("Balance")
df["balance"].plot(legend=True)
drawdown_plot = plt.subplot(4, 1, 2)
drawdown_plot.set_title("Drawdown")
drawdown_plot.fill_between(range(len(df)), df["drawdown"].values)
pnl_plot = plt.subplot(4, 1, 3)
pnl_plot.set_title("Daily Pnl")
df["net_pnl"].plot(kind="bar", legend=False, grid=False, xticks=[])
distribution_plot = plt.subplot(4, 1, 4)
distribution_plot.set_title("Daily Pnl Distribution")
df["net_pnl"].hist(bins=50)
plt.show()
def run_optimization(self, optimization_setting: OptimizationSetting, output=True):
""""""
# Get optimization setting and target
settings = optimization_setting.generate_setting()
target_name = optimization_setting.target_name
if not settings:
self.output("优化参数组合为空,请检查")
return
if not target_name:
self.output("优化目标未设置,请检查")
return
# Use multiprocessing pool for running backtesting with different setting
pool = multiprocessing.Pool(multiprocessing.cpu_count())
results = []
for setting in settings:
result = (pool.apply_async(optimize, (
target_name,
self.strategy_class,
setting,
self.vt_symbol,
self.interval,
self.start,
self.rate,
self.slippage,
self.size,
self.pricetick,
self.capital,
self.end,
self.mode
)))
results.append(result)
pool.close()
pool.join()
# Sort results and output
result_values = [result.get() for result in results]
result_values.sort(reverse=True, key=lambda result: result[1])
if output:
for value in result_values:
msg = f"参数:{value[0]}, 目标:{value[1]}"
self.output(msg)
return result_values
def run_ga_optimization(self, optimization_setting: OptimizationSetting, population_size=100, ngen_size=30, output=True):
""""""
# Get optimization setting and target
settings = optimization_setting.generate_setting_ga()
target_name = optimization_setting.target_name
if not settings:
self.output("优化参数组合为空,请检查")
return
if not target_name:
self.output("优化目标未设置,请检查")
return
# Define parameter generation function
def generate_parameter():
""""""
return random.choice(settings)
def mutate_individual(individual, indpb):
""""""
size = len(individual)
paramlist = generate_parameter()
for i in range(size):
if random.random() < indpb:
individual[i] = paramlist[i]
return individual,
# Create ga object function
global ga_target_name
global ga_strategy_class
global ga_setting
global ga_vt_symbol
global ga_interval
global ga_start
global ga_rate
global ga_slippage
global ga_size
global ga_pricetick
global ga_capital
global ga_end
global ga_mode
ga_target_name = target_name
ga_strategy_class = self.strategy_class
ga_setting = settings[0]
ga_vt_symbol = self.vt_symbol
ga_interval = self.interval
ga_start = self.start
ga_rate = self.rate
ga_slippage = self.slippage
ga_size = self.size
ga_pricetick = self.pricetick
ga_capital = self.capital
ga_end = self.end
ga_mode = self.mode
# Set up genetic algorithem
toolbox = base.Toolbox()
toolbox.register("individual", tools.initIterate, creator.Individual, generate_parameter)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", mutate_individual, indpb=1)
toolbox.register("evaluate", ga_optimize)
toolbox.register("select", tools.selNSGA2)
total_size = len(settings)
pop_size = population_size # number of individuals in each generation
lambda_ = pop_size # number of children to produce at each generation
mu = int(pop_size * 0.8) # number of individuals to select for the next generation
cxpb = 0.95 # probability that an offspring is produced by crossover
mutpb = 1 - cxpb # probability that an offspring is produced by mutation
ngen = ngen_size # number of generation
pop = toolbox.population(pop_size)
hof = tools.ParetoFront() # end result of pareto front
stats = tools.Statistics(lambda ind: ind.fitness.values)
np.set_printoptions(suppress=True)
stats.register("mean", np.mean, axis=0)
stats.register("std", np.std, axis=0)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
# Multiprocessing is not supported yet.
# pool = multiprocessing.Pool(multiprocessing.cpu_count())
# toolbox.register("map", pool.map)
# Run ga optimization
self.output(f"参数优化空间:{total_size}")
self.output(f"每代族群总数:{pop_size}")
self.output(f"优良筛选个数:{mu}")
self.output(f"迭代次数:{ngen}")
self.output(f"交叉概率:{cxpb:.0%}")
self.output(f"突变概率:{mutpb:.0%}")
start = time()
algorithms.eaMuPlusLambda(
pop,
toolbox,
mu,
lambda_,
cxpb,
mutpb,
ngen,
stats,
halloffame=hof
)
end = time()
cost = int((end - start))
self.output(f"遗传算法优化完成,耗时{cost}秒")
# Return result list
results = []
for parameter_values in hof:
setting = dict(parameter_values)
target_value = ga_optimize(parameter_values)[0]
results.append((setting, target_value, {}))
return results
def update_daily_close(self, price: float):
""""""
d = self.datetime.date()
daily_result = self.daily_results.get(d, None)
if daily_result:
daily_result.close_price = price
else:
self.daily_results[d] = DailyResult(d, price)
def new_bar(self, bar: BarData):
""""""
self.bar = bar
self.datetime = bar.datetime
self.cross_limit_order()
self.cross_stop_order()
self.strategy.on_bar(bar)
self.update_daily_close(bar.close_price)
def new_tick(self, tick: TickData):
""""""
self.tick = tick
self.datetime = tick.datetime
self.cross_limit_order()
self.cross_stop_order()
self.strategy.on_tick(tick)
self.update_daily_close(tick.last_price)
def cross_limit_order(self):
"""
Cross limit order with last bar/tick data.
"""
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.low_price
short_cross_price = self.bar.high_price
long_best_price = self.bar.open_price
short_best_price = self.bar.open_price
else:
long_cross_price = self.tick.ask_price_1
short_cross_price = self.tick.bid_price_1
long_best_price = long_cross_price
short_best_price = short_cross_price
for order in list(self.active_limit_orders.values()):
# Push order update with status "not traded" (pending).
if order.status == Status.SUBMITTING:
order.status = Status.NOTTRADED
self.strategy.on_order(order)
# Check whether limit orders can be filled.
long_cross = (
order.direction == Direction.LONG
and order.price >= long_cross_price
and long_cross_price > 0
)
short_cross = (
order.direction == Direction.SHORT
and order.price <= short_cross_price
and short_cross_price > 0
)
if not long_cross and not short_cross:
continue
# Push order udpate with status "all traded" (filled).
order.traded = order.volume
order.status = Status.ALLTRADED
self.strategy.on_order(order)
self.active_limit_orders.pop(order.vt_orderid)
# Push trade update
self.trade_count += 1
if long_cross:
trade_price = min(order.price, long_best_price)
pos_change = order.volume
else:
trade_price = max(order.price, short_best_price)
pos_change = -order.volume
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
time=self.datetime.strftime("%H:%M:%S"),
gateway_name=self.gateway_name,
)
trade.datetime = self.datetime
self.strategy.pos += pos_change
self.strategy.on_trade(trade)
self.trades[trade.vt_tradeid] = trade
def cross_stop_order(self):
"""
Cross stop order with last bar/tick data.
"""
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.high_price
short_cross_price = self.bar.low_price
long_best_price = self.bar.open_price
short_best_price = self.bar.open_price
else:
long_cross_price = self.tick.last_price
short_cross_price = self.tick.last_price
long_best_price = long_cross_price
short_best_price = short_cross_price
for stop_order in list(self.active_stop_orders.values()):
# Check whether stop order can be triggered.
long_cross = (
stop_order.direction == Direction.LONG
and stop_order.price <= long_cross_price
)
short_cross = (
stop_order.direction == Direction.SHORT
and stop_order.price >= short_cross_price
)
if not long_cross and not short_cross:
continue
# Create order data.
self.limit_order_count += 1
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=str(self.limit_order_count),
direction=stop_order.direction,
offset=stop_order.offset,
price=stop_order.price,
volume=stop_order.volume,
status=Status.ALLTRADED,
gateway_name=self.gateway_name,
)
order.datetime = self.datetime
self.limit_orders[order.vt_orderid] = order
# Create trade data.
if long_cross:
trade_price = max(stop_order.price, long_best_price)
pos_change = order.volume
else:
trade_price = min(stop_order.price, short_best_price)
pos_change = -order.volume
self.trade_count += 1
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=str(self.trade_count),
direction=order.direction,
offset=order.offset,
price=trade_price,
volume=order.volume,
time=self.datetime.strftime("%H:%M:%S"),
gateway_name=self.gateway_name,
)
trade.datetime = self.datetime
self.trades[trade.vt_tradeid] = trade
# Update stop order.
stop_order.vt_orderid = order.vt_orderid
stop_order.status = StopOrderStatus.TRIGGERED
self.active_stop_orders.pop(stop_order.stop_orderid)
# Push update to strategy.
self.strategy.on_stop_order(stop_order)
self.strategy.on_order(order)
self.strategy.pos += pos_change
self.strategy.on_trade(trade)
def load_bar(
self, vt_symbol: str, days: int, interval: Interval, callback: Callable
):
""""""
self.days = days
self.callback = callback
def load_tick(self, vt_symbol: str, days: int, callback: Callable):
""""""
self.days = days
self.callback = callback
def send_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
):
""""""
price = round_to(price, self.pricetick)
if stop:
vt_orderid = self.send_stop_order(direction, offset, price, volume)
else:
vt_orderid = self.send_limit_order(direction, offset, price, volume)
return [vt_orderid]
def send_stop_order(
self,
direction: Direction,
offset: Offset,
price: float,
volume: float
):
""""""
self.stop_order_count += 1
stop_order = StopOrder(
vt_symbol=self.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=f"{STOPORDER_PREFIX}.{self.stop_order_count}",
strategy_name=self.strategy.strategy_name,
)
self.active_stop_orders[stop_order.stop_orderid] = stop_order
self.stop_orders[stop_order.stop_orderid] = stop_order
return stop_order.stop_orderid
def send_limit_order(
self,
direction: Direction,
offset: Offset,
price: float,
volume: float
):
""""""
self.limit_order_count += 1
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=str(self.limit_order_count),
direction=direction,
offset=offset,
price=price,
volume=volume,
status=Status.NOTTRADED,
gateway_name=self.gateway_name,
)
order.datetime = self.datetime
self.active_limit_orders[order.vt_orderid] = order
self.limit_orders[order.vt_orderid] = order
return order.vt_orderid
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
Cancel order by vt_orderid.
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_stop_order(strategy, vt_orderid)
else:
self.cancel_limit_order(strategy, vt_orderid)
def cancel_stop_order(self, strategy: CtaTemplate, vt_orderid: str):
""""""
if vt_orderid not in self.active_stop_orders:
return
stop_order = self.active_stop_orders.pop(vt_orderid)
stop_order.status = StopOrderStatus.CANCELLED
self.strategy.on_stop_order(stop_order)
def cancel_limit_order(self, strategy: CtaTemplate, vt_orderid: str):
""""""
if vt_orderid not in self.active_limit_orders:
return
order = self.active_limit_orders.pop(vt_orderid)
order.status = Status.CANCELLED
self.strategy.on_order(order)
def cancel_all(self, strategy: CtaTemplate):
"""
Cancel all orders, both limit and stop.
"""
vt_orderids = list(self.active_limit_orders.keys())
for vt_orderid in vt_orderids:
self.cancel_limit_order(strategy, vt_orderid)
stop_orderids = list(self.active_stop_orders.keys())
for vt_orderid in stop_orderids:
self.cancel_stop_order(strategy, vt_orderid)
def write_log(self, msg: str, strategy: CtaTemplate = None):
"""
Write log message.
"""
msg = f"{self.datetime}\t{msg}"
self.logs.append(msg)
def send_email(self, msg: str, strategy: CtaTemplate = None):
"""
Send email to default receiver.
"""
pass
def get_engine_type(self):
"""
Return engine type.
"""
return self.engine_type
def put_strategy_event(self, strategy: CtaTemplate):
"""
Put an event to update strategy status.
"""
pass
def output(self, msg):
"""
Output message of backtesting engine.
"""
print(f"{datetime.now()}\t{msg}")
def get_all_trades(self):
"""
Return all trade data of current backtesting result.
"""
return list(self.trades.values())
def get_all_orders(self):
"""
Return all limit order data of current backtesting result.
"""
return list(self.limit_orders.values())
def get_all_daily_results(self):
"""
Return all daily result data.
"""
return list(self.daily_results.values())
class DailyResult:
""""""
def __init__(self, date: date, close_price: float):
""""""
self.date = date
self.close_price = close_price
self.pre_close = 0
self.trades = []
self.trade_count = 0
self.start_pos = 0
self.end_pos = 0
self.turnover = 0
self.commission = 0
self.slippage = 0
self.trading_pnl = 0
self.holding_pnl = 0
self.total_pnl = 0
self.net_pnl = 0
def add_trade(self, trade: TradeData):
""""""
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float,
):
""""""
self.pre_close = pre_close
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * \
(self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
turnover = trade.price * trade.volume * size
self.trading_pnl += pos_change * \
(self.close_price - trade.price) * size
self.end_pos += pos_change
self.turnover += turnover
self.commission += turnover * rate
self.slippage += trade.volume * size * slippage
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def optimize(
target_name: str,
strategy_class: CtaTemplate,
setting: dict,
vt_symbol: str,
interval: Interval,
start: datetime,
rate: float,
slippage: float,
size: float,
pricetick: float,
capital: int,
end: datetime,
mode: BacktestingMode
):
"""
Function for running in multiprocessing.pool
"""
engine = BacktestingEngine()
engine.set_parameters(
vt_symbol=vt_symbol,
interval=interval,
start=start,
rate=rate,
slippage=slippage,
size=size,
pricetick=pricetick,
capital=capital,
end=end,
mode=mode
)
engine.add_strategy(strategy_class, setting)
engine.load_data()
engine.run_backtesting()
engine.calculate_result()
statistics = engine.calculate_statistics(output=False)
target_value = statistics[target_name]
return (str(setting), target_value, statistics)
@lru_cache(maxsize=1000000)
def _ga_optimize(parameter_values: tuple):
""""""
setting = dict(parameter_values)
result = optimize(
ga_target_name,
ga_strategy_class,
setting,
ga_vt_symbol,
ga_interval,
ga_start,
ga_rate,
ga_slippage,
ga_size,
ga_pricetick,
ga_capital,
ga_end,
ga_mode
)
return (result[1],)
def ga_optimize(parameter_values: list):
""""""
return _ga_optimize(tuple(parameter_values))
@lru_cache(maxsize=10)
def load_bar_data(
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
):
""""""
return database_manager.load_bar_data(
symbol, exchange, interval, start, end
)
@lru_cache(maxsize=10)
def load_tick_data(
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
):
""""""
return database_manager.load_tick_data(
symbol, exchange, start, end
)
# GA related global value
ga_end = None
ga_mode = None
ga_target_name = None
ga_strategy_class = None
ga_setting = None
ga_vt_symbol = None
ga_interval = None
ga_start = None
ga_rate = None
ga_slippage = None
ga_size = None
ga_pricetick = None
ga_capital = None
| mit |
466152112/scikit-learn | sklearn/naive_bayes.py | 128 | 28358 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
lhoang29/vowpal_wabbit | python/tests/test_sklearn_vw.py | 1 | 5425 | from collections import namedtuple
import numpy as np
import pytest
from vowpalwabbit.sklearn_vw import VW, VWClassifier, VWRegressor, tovw
from sklearn import datasets
from sklearn.exceptions import NotFittedError
from scipy.sparse import csr_matrix
"""
Test utilities to support integration of Vowpal Wabbit and scikit-learn
"""
Dataset = namedtuple('Dataset', 'x, y')
@pytest.fixture(scope='module')
def data():
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x = x.astype(np.float32)
return Dataset(x=x, y=y)
class TestVW:
def test_validate_vw_estimator(self):
"""
Run VW and VWClassifier through the sklearn estimator validation check
Note: the VW estimators fail sklearn's estimator validation check. The validator creates a new
instance of the estimator with the estimator's default args, '--quiet' in VW's case. At some point
in the validation sequence it calls fit() with some fake data. The data gets formatted via tovw() to:
2 1 | 0:0.5488135039273248 1:0.7151893663724195 2:0.6027633760716439 3:0.5448831829968969 4:0.4236547993389047 5:0.6458941130666561 6:0.4375872112626925 7:0.8917730007820798 8:0.9636627605010293 9:0.3834415188257777
This gets passed into vw.learn and the python process dies with the error, "Process finished with exit code 139"
At some point it would probably be worth while figuring out the problem this and getting the two estimators to
pass sklearn's validation check
"""
# check_estimator(VW)
# check_estimator(VWClassifier)
def test_init(self):
assert isinstance(VW(), VW)
def test_fit(self, data):
model = VW(loss_function='logistic')
assert not hasattr(model, 'fit_')
model.fit(data.x, data.y)
assert model.fit_
def test_passes(self, data):
n_passes = 2
model = VW(loss_function='logistic', passes=n_passes)
assert model.passes_ == n_passes
model.fit(data.x, data.y)
weights = model.get_coefs()
model = VW(loss_function='logistic')
# first pass weights should not be the same
model.fit(data.x, data.y)
assert not np.allclose(weights.data, model.get_coefs().data)
def test_predict_not_fit(self, data):
model = VW(loss_function='logistic')
with pytest.raises(NotFittedError):
model.predict(data.x[0])
def test_predict(self, data):
model = VW(loss_function='logistic')
model.fit(data.x, data.y)
assert np.isclose(model.predict(data.x[:1][:1])[0], 0.406929)
def test_predict_no_convert(self):
model = VW(loss_function='logistic', convert_to_vw=False)
model.fit(['-1 | bad', '1 | good'])
assert np.isclose(model.predict(['| good'])[0], 0.245515)
def test_set_params(self):
model = VW()
assert 'l' not in model.params
model.set_params(l=0.1)
assert model.params['l'] == 0.1
# confirm model params reset with new construction
model = VW()
assert 'l' not in model.params
def test_get_coefs(self, data):
model = VW()
model.fit(data.x, data.y)
weights = model.get_coefs()
assert np.allclose(weights.indices, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 116060])
def test_get_intercept(self, data):
model = VW()
model.fit(data.x, data.y)
intercept = model.get_intercept()
assert isinstance(intercept, float)
def test_oaa(self):
X = ['1 | feature1:2.5',
'2 | feature1:0.11 feature2:-0.0741',
'3 | feature3:2.33 feature4:0.8 feature5:-3.1',
'1 | feature2:-0.028 feature1:4.43',
'2 | feature5:1.532 feature6:-3.2']
model = VW(convert_to_vw=False, oaa=3)
model.fit(X)
assert np.allclose(model.predict(X), [ 1., 2., 3., 1., 2.])
class TestVWClassifier:
def test_init(self):
assert isinstance(VWClassifier(), VWClassifier)
def test_decision_function(self, data):
classes = np.array([-1., 1.])
raw_model = VW(loss_function='logistic')
raw_model.fit(data.x, data.y)
predictions = raw_model.predict(data.x)
class_indices = (predictions > 0).astype(np.int)
expected = classes[class_indices]
model = VWClassifier()
model.fit(data.x, data.y)
actual = model.predict(data.x)
assert np.allclose(expected, actual)
class TestVWRegressor:
def test_init(self):
assert isinstance(VWRegressor(), VWRegressor)
def test_predict(self, data):
raw_model = VW()
raw_model.fit(data.x, data.y)
model = VWRegressor()
model.fit(data.x, data.y)
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
# ensure model can make multiple calls to predict
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
def test_delete(self):
raw_model = VW()
del raw_model
def test_tovw():
x = np.array([[1.2, 3.4, 5.6, 1.0, 10], [7.8, 9.10, 11, 0, 20]])
y = np.array([1, -1])
w = [1, 2]
expected = ['1 1 | 0:1.2 1:3.4 2:5.6 3:1 4:10',
'-1 2 | 0:7.8 1:9.1 2:11 4:20']
assert tovw(x=x, y=y, sample_weight=w) == expected
assert tovw(x=csr_matrix(x), y=y, sample_weight=w) == expected
| bsd-3-clause |
dronir/EM | python/gather.py | 1 | 10232 | import Scientific.IO.NetCDF as nc
import numpy as np
import sys
import math
import pylab as pl
import matplotlib.colors as colors
from numpy import floor, sqrt, sin, cos, arccos, arctan2, pi
class gth_hemisphere:
"""Class implementing the gathering hemisphere."""
def __init__(self, resTheta=1, nThetaI=1, nDataLevels=1, phiRange=2.0*pi, type='Hemisphere'):
self.Type = type
self.resTheta = resTheta
self.dTheta = 0.5 * pi / float(resTheta)
self.dThetaInv = 1.0 / self.dTheta
self.nCells = 1
self.nThetaI = nThetaI
self.nLevels = nDataLevels
self.type = type
self.dPhi = np.zeros(resTheta)
self.dPhiInv = np.zeros(resTheta)
self.dA = np.zeros(resTheta)
self.mTheta = np.zeros(resTheta)
self.nPhi = np.zeros(resTheta, np.int64)
self.cIdx = np.zeros(resTheta)
self.phiRange = phiRange
dA0 = self.phiRange * (1.0 - cos(self.dTheta))
self.nPhi [0] = 1
self.cIdx [0] = 0
self.dPhi [0] = self.phiRange
self.dPhiInv [0] = 1.0 / self.phiRange
self.dA [0] = dA0
self.mTheta [0] = 0.5 * self.dTheta
for i in range(1, resTheta):
dPhi = dA0 / (cos(i * self.dTheta) - cos((i+1) * self.dTheta))
rPhi = round(self.phiRange / dPhi)
dPhi = self.phiRange / float(rPhi)
self.nPhi [i] = rPhi
self.dPhi [i] = dPhi
self.dPhiInv [i] = 1.0 / dPhi
self.dA [i] = dPhi * (cos(i * self.dTheta) - cos((i+1) * self.dTheta))
self.mTheta [i] = self.dTheta * (float(i) - 0.5)
self.cIdx [i] = self.cIdx[i-1] + self.nPhi[i-1]
self.nCells = self.nCells + rPhi
self.dAMean = self.phiRange / float(self.nCells)
self.hsData = {}
self.data = np.zeros([nDataLevels, self.nCells, nThetaI])
self.weight = np.zeros([nDataLevels, self.nCells, nThetaI])
def load(self, fName):
"""
Loads the hemisphere data from a netCDF file.
Returns: nothing
"""
try:
dFile = nc.NetCDFFile(fName, "r")
except IOError:
print "Error reading file, exiting."
sys.exit()
if "Hemisphere" not in dFile.variables.keys():
print "Error: not a proper hemisphere file."
sys.exit()
if "Elements" in dir(dFile):
self.Elements = str(dFile.Elements).split()
self.Type = dFile.Type
self.nPhi = np.array(dFile.nPhi)
self.cIdx = np.array(dFile.cIdx)
## Convert Fortran indices to numpy indices
if self.cIdx[0] == 1:
self.cIdx -= 1
self.dPhi = np.array(dFile.dPhi)
self.dPhiInv = 1.0 / self.dPhi
self.nThetaI = int(dFile.nThetaI)
self.nLevels = int(dFile.nLevels)
self.resTheta = int(dFile.nThetaE)
self.dTheta = 0.5 * math.pi / float(self.resTheta)
self.dThetaInv = 1.0/self.dTheta
self.dA = dFile.dA
self.nCells = int(dFile.nCells)
self.type = dFile.Type
try:
self.w = float(dFile.Single_scattering_albedo)
except:
pass
self.hsData['Simulation'] = np.array(dFile.variables['Hemisphere'].getValue())
self.data = np.array(dFile.variables['Hemisphere'].getValue())
dFile.close()
def divideBySolidAngle(self):
for i in range(self.resTheta):
self.data[:, self.cIdx[i] : self.cIdx[i] + self.nPhi[i], :] /= self.dA[i]
def carDirToCell(self, D):
r = sqrt ( (D**2).sum() )
theta = arccos ( D[2] / r )
phi = arctan2 ( D[1] / r, D[0] / r )
if( phi < 0.0 ):
phi = 2.0*pi + phi
t = floor( theta * self.dThetaInv )
p = floor( phi * self.dPhiInv[t] )
return self.cIdx[t] + p
def addDataCar(self, D, v, set=0, lvl=0):
c = self.carDirToCell(D)
self.data[set, c, lvl] += v
def toArray(self, set=0, lvl=0, hsDataSet='Simulation'):
"""
Unpacks the gathering hemisphere into a 2-dimensional array.
Returns: numpy.array
"""
resTheta = self.resTheta
resPhi = self.nPhi.max()
if(self.Type == 'Hemisphere'):
dp = math.pi * 2.0 / float(resPhi)
else:
dp = math.pi / float(resPhi)
data = np.zeros([resTheta, resPhi])
for i in range(resTheta):
dPhiI = dp * self.dPhiInv[i]
for j in range(resPhi):
data[i,j] = self.hsData[hsDataSet][lvl, self.cIdx[i] + int(math.floor(j * dPhiI)), set]
return data
def phiSlice(self, theta, set=0, lvl=0, hsDataSet='Simulation'):
"""
Returns: numpy.array
"""
iTheta = int(math.floor(theta * self.dThetaInv))
resPhi = self.nPhi[iTheta]
dPhi = self.dPhi[iTheta]
data = np.zeros([resPhi,2])
for i in range(resPhi):
data[i,0] = (i + 0.5) * dPhi
data[i,1] = self.rows[set][iTheta][i,lvl]
return data
def thetaSlice(self, phi, set=0, lvl=0, hsDataSet='Simulation'):
"""
Returns: numpy.array
"""
data = np.zeros([self.resTheta, 2])
for i in range(self.resTheta):
data[i,0] = (i+0.5) * self.dTheta
#data[i,1] = self.hsData[set][i][phi * self.dPhiInv[i],lvl]
data[i,1] = self.hsData[hsDataSet][lvl, phi * self.dPhiInv[i], set]
return data
def eval(self, thtI, thtE, phi):
#NOTE: QUICK FIX! NEEDS CORRECTING!
iThtI = int(math.floor(thtI/10.))
iThtE = int(math.floor(math.radians(thtE) * self.dThetaInv))
iPhi = int(math.floor(math.radians(phi) * self.dPhiInv[iThtE]))
x = (thtI % 10.) / 10.
if iThtI+1 < 9:
return ((1.0 - x)*self.hsData['Simulation'][0, self.cIdx[iThtE]+iPhi, iThtI] +
x*self.hsData['Simulation'][0, self.cIdx[iThtE]+iPhi, iThtI+1]) * 4.0 * math.pi
else:
return self.hsData['Simulation'][0, self.cIdx[iThtE]+iPhi, iThtI] * 4.0 * math.pi
def asArray(self, set=0, lvl=0, hsDataSet='Simulation'):
if self.type == 'Quartersphere':
data = np.zeros([self.nCells*2,4])
data[0:self.nCells,0] = self.hsData[hsDataSet][lvl, :, set]
data[self.nCells:2*self.nCells,0] = self.hsData[hsDataSet][lvl, :, set]
else:
data = np.zeros([self.nCells,4])
data[:,0] = self.hsData[hsDataSet][lvl, :, set]
for iThtE in range(self.resTheta):
if(iThtE != 0):
data[self.cIdx[iThtE] : self.cIdx[iThtE] + self.nPhi[iThtE], 1] = (iThtE + 0.5) * self.dTheta
else:
data[0, 1] = 0.0
for iPhi in range(self.nPhi[iThtE]):
data[self.cIdx[iThtE] + iPhi, 2] = pi - (iPhi + 0.5) * self.dPhi[iThtE]
data[self.cIdx[iThtE] : self.cIdx[iThtE] + self.nPhi[iThtE], 3] = self.dA[iThtE]
if self.type == 'Quartersphere':
data[self.nCells:2*self.nCells,1] = data[0:self.nCells,1]
data[self.nCells:2*self.nCells,2] = 2*pi - data[0:self.nCells,2]
data[self.nCells:2*self.nCells,3] = data[0:self.nCells,3]
return data
class xrHemisphere(gth_hemisphere):
def __init__(self, resTheta=1, nThetaI=1, nDataLevels=1):
gth_hemisphere.__init__(self, resTheta, nThetaI, nDataLevels)
def load(self, fName):
"""
Loads the hemisphere data from a netCDF file.
Returns: nothing
"""
try:
dFile = nc.NetCDFFile(fName, "r")
except IOError:
print "Error reading file, exiting."
sys.exit()
if "Hemisphere" not in dFile.variables.keys():
print "Error: not a proper hemisphere file."
sys.exit()
try:
self.Elements = str(dFile.Elements).split()
self.muPhotoIon = np.array(dFile.variables['Photoionization_coefficient'].getValue())
self.muAbs = np.array(dFile.variables['Fluorescence_line_coefficient'].getValue())
self.muAbsCDF = np.array(dFile.variables['Fluorescence_line_cdf'].getValue())
self.muRay = np.array(dFile.variables['Rayleigh_coefficient'].getValue())
self.muExt = np.array(dFile.variables['Extinction_coefficient'].getValue())
self.lEnergy = np.array(dFile.Fluorescence_line_energy)
self.eEnergy = np.array(dFile.Absorbtion_edge_energy)
self.energy = np.array(dFile.variables['Material_energy'].getValue())
self.Method = dFile.Simulation_method
print self.lEnergy[0], self.eEnergy[0]
print self.energy.min()
except (KeyError, AttributeError), e:
print "Error: Malformed input file, missing data.", e
sys.exit(1)
## Spectrum data
##
print "Loading spectrum data..."
try:
self.spcType = dFile.Spectrum_type
self.SpectrumE = np.array(dFile.variables['Spectrum_energy'].getValue())
self.Spectrum = np.array(dFile.variables['Spectrum_intensity'].getValue())
if self.spcType == 'Spectrum':
self.SpectrumCdf = np.array(dFile.variables['Spectrum_cdf'].getValue())
self.SpectrumCdfInv = np.array(dFile.variables['Spectrum_inverse_cdf'].getValue())
print "\tSpectrum data loaded."
except KeyError, e:
print "\tNo spectrum data found."
## Medium data
##
print "Loading medium data..."
try:
self.medHeightmap = np.array(dFile.variables['Medium_heightmap'].getValue())
self.medDensitymap = np.array(dFile.variables['Medium_densitymap'].getValue())
print "\tMedium data loaded."
except KeyError, e:
print "\tNo medium data found."
self.Type = dFile.Type
self.nPhi = np.array(dFile.nPhi)
self.cIdx = np.array(dFile.cIdx)
## Convert Fortran indices to numpy indices
if self.cIdx[0] == 1:
self.cIdx -= 1
self.dPhi = np.array(dFile.dPhi)
self.dPhiInv = 1.0 / self.dPhi
self.nThetaI = int(dFile.nThetaI)
self.thetaI = np.array(dFile.Theta_in)
self.nLevels = int(dFile.nLevels)
self.resTheta = int(dFile.nThetaE)
self.dTheta = 0.5 * math.pi / float(self.resTheta)
self.dThetaInv = 1.0/self.dTheta
self.dA = dFile.dA
self.hsData = {}
self.hsData['Simulation'] = np.array(dFile.variables['Hemisphere'].getValue())
if "Hemisphere_analytic" in dFile.variables.keys():
self.hsData['Analytic'] = np.array(dFile.variables['Hemisphere_analytic'].getValue())
dFile.close()
| gpl-3.0 |
xavierwu/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
xflows/tf_core | tf_core/nltoolkit/lib/classification.py | 1 | 8140 | #!/usr/bin/env python
# from nltk.classify.megam import config_megam, call_megam
#from nltk.classify.weka import WekaClassifier, config_weka
from datetime import time
from nltk import ELEProbDist
from nltk.classify.naivebayes import NaiveBayesClassifier
from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier
from nltk.classify.decisiontree import DecisionTreeClassifier
from nltk.classify.rte_classify import rte_classifier, rte_features, RTEFeatureExtractor
from nltk.classify.util import accuracy, apply_features, log_likelihood
from nltk.classify.scikitlearn import SklearnClassifier
from nltk.classify.maxent import (MaxentClassifier, BinaryMaxentFeatureEncoding,
TypedMaxentFeatureEncoding,
ConditionalExponentialClassifier)
#################
#OBSERVATIONS
#################
#GaussianNB is not a good fit for document classification at all, since tf-idf values are non-negative frequencies;
# use MultinomialNB instead, and maybe try BernoulliNB. scikit-learn comes with a document classification example that,
# incidentally, uses tf-idf weighting using the built-in TfidfTransformer.
#################
from tf_core.nltoolkit.helpers import NltkClassifier, DictionaryProbDist
def nltk_naive_bayes_classifier(input_dict):
"""
A classifier based on the Naive Bayes algorithm. In order to find the
probability for a label, this algorithm first uses the Bayes rule to
express P(label|features) in terms of P(label) and P(features|label):
| P(label) * P(features|label)
| P(label|features) = ------------------------------
| P(features)
The algorithm then makes the 'naive' assumption that all features are
independent, given the label:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| P(features)
Rather than computing P(featues) explicitly, the algorithm just
calculates the denominator for each label, and normalizes them so they
sum to one:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) )
"""
estimator = ELEProbDist #TODO estimator
classifier=NltkClassifier(NaiveBayesClassifier,estimator=estimator)
return {'classifier': classifier}
from nltk.classify import DecisionTreeClassifier, MaxentClassifier, NaiveBayesClassifier, megam
#from nltk_trainer import basestring
#from nltk_trainer.classification.multi import AvgProbClassifier
#
# classifier_choices = ['NaiveBayes', 'DecisionTree', 'Maxent'] + MaxentClassifier.ALGORITHMS
#
# dense_classifiers = set(['ExtraTreesClassifier', 'GradientBoostingClassifier',
# 'RandomForestClassifier', 'GaussianNB', 'DecisionTreeClassifier'])
# verbose_classifiers = set(['RandomForestClassifier', 'SVC'])
#
# try:
# import svmlight # do this first since svm module makes ugly errors
# from nltk.classify.svm import SvmClassifier
#
# classifier_choices.append('Svm')
# except:
# pass
#
# try:
# from nltk.classify import scikitlearn
# from sklearn.feature_extraction.text import TfidfTransformer
# from sklearn.pipeline import Pipeline
# from sklearn import ensemble, feature_selection, linear_model, naive_bayes, neighbors, svm, tree
#
# classifiers = [
# ensemble.ExtraTreesClassifier,
# ensemble.GradientBoostingClassifier,
# ensemble.RandomForestClassifier,
# linear_model.LogisticRegression,
# #linear_model.SGDClassifier, # NOTE: this seems terrible, but could just be the options
# naive_bayes.BernoulliNB,
# naive_bayes.GaussianNB,
# naive_bayes.MultinomialNB,
# neighbors.KNeighborsClassifier, # TODO: options for nearest neighbors
# svm.LinearSVC,
# svm.NuSVC,
# svm.SVC,
# tree.DecisionTreeClassifier,
# ]
# sklearn_classifiers = {}
#
# for classifier in classifiers:
# sklearn_classifiers[classifier.__name__] = classifier
#
# classifier_choices.extend(sorted(['sklearn.%s' % c.__name__ for c in classifiers]))
# except ImportError as exc:
# sklearn_classifiers = {}
def train_classifier(input_dict):
classifier=input_dict['classifier']
training_bow_dataset = input_dict['training_data'] #BowDataset
training_data=training_bow_dataset.bow_in_proper_format(classifier)
if isinstance(classifier,NltkClassifier):
trained_classifier=classifier.train(training_data)
return {'trained_classifier': trained_classifier}
elif input_dict['classifier'].__module__.startswith('sklearn'):
classifier.fit(training_data, training_bow_dataset.labels)
return {'trained_classifier': classifier}
else:
from tf_latino.latino.library_gen import latino_train_classifier
return latino_train_classifier(input_dict)
def convert_to_probdists(csf,y_probas):
classes = csf.classes_
return [DictionaryProbDist(dict((classes[i], p)
for i, p in enumerate(y_proba))) for y_proba in y_probas]
#apply_classifier already exists in orange package
def apply_bow_classifier(input_dict):
trained_classifier = input_dict['trained_classifier']
if input_dict.get('probability','true')=='true':
try:
trained_classifier.set_params(probability=True)
except (ValueError,AttributeError): #some classifiers don't have probability parameter
pass
testing_bow_dataset = input_dict['testing_dataset']
testing_dataset=testing_bow_dataset.bow_in_proper_format(trained_classifier,no_labels=True)
classifier_package=input_dict['trained_classifier'].__module__
if trained_classifier.__class__.__name__=='LatinoObject': #check if this is a latino object
from tf_latino.latino.library_gen import latino_predict_classification
return latino_predict_classification(input_dict)
elif classifier_package.startswith("sklearn"):
#a=trained_classifier.predict(testing_dataset)
#example: http://scikit-learn.org/stable/auto_examples/document_classification_20newsgroups.html
try:
results = [DictionaryProbDist.from_probabilities_and_classes(example_predictions,trained_classifier.classes_)
for example_predictions in trained_classifier.predict_proba(testing_dataset)]
except AttributeError:
results = [DictionaryProbDist.from_prediction_and_classes(example_prediction,trained_classifier.classes_)
for example_prediction in trained_classifier.predict(testing_dataset)]
#results=convert_to_probdists(trained_classifier,_results)
elif isinstance(trained_classifier,NltkClassifier):
results=[DictionaryProbDist(prob_dict=dpd._prob_dict,normalize=True)
for dpd in trained_classifier.prob_classify_many(testing_dataset)]
else:
raise Exception("What are you connecting me to then?")
return {'labeled_dataset': None, 'predictions': results}
def extract_classifier_name(input_dict):
import re
in1=input_dict['classifier']
clsf=in1.__class__.__module__+'.'+in1.__class__.__name__ if not in1.__class__.__name__=='LatinoObject' else in1.name
out2=re.search(r'[A-Za-z\.0-9]+',clsf).group()
spl=out2.split('.')
if spl[0]=='sklearn':
spl[0]='scikit-learn'
elif spl[0]=='Latino':
spl[0]='LATINO'
if spl[-1]=='naive':
spl[-1]='Gaussian Naive Bayes Classifier'
if spl[-1]=='LinearSVC':
spl[-1]='SVM Linear Classifier'
return {'classifier_name': '['+spl[0]+'] '+spl[-1]}
def extract_actual_and_predicted_values(input_dict):
actual=input_dict['dataset']
predicted=input_dict['predictions'] #[a.max() for a in input_dict['predictions']]
return {'actual_and_predicted': [list(actual.Y), list(predicted.Y)]} | mit |
Wafflespeanut/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
eor/STARDUST | scripts/sd_plot/sd_plot.py | 1 | 6960 | #!/usr/bin/python
# -*- coding: utf-8 -*-
def multi_plot(inputFiles, outFile, xLimits, ylimits, logT=False, logFractions=False, legends=[], showLegend=True, ncolsLegend=4, doColor=True, colors=None, ls=None):
import numpy as np
import matplotlib.pyplot as plt
import math
# enable these if you wannt nice looking plots
#from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
##rc('font',**{'family':'serif','serif':['Palatino']})
##rc('text', usetex=True)
rows = 4
columns = 2
stylechange = 5
plotdistX = 0.1
plotdistY = 0.1
labelFontSize = 15
if colors:
color = colors
else:
color = ['black','red','blue','green','purple','orange','yellow']
if not ls:
ls = ['-' , '-', '--', '-', '--', '-', '--', '-', '--',]
f, axarr = plt.subplots(rows, columns, sharex=True, sharey=False, figsize=(10,15))
xlabel = '$r\, [\mathrm{kpc}]$'
# labels for fractions
if logFractions == True:
ylabelFrac = [ r'$\log\left(x_\mathrm{H_{I}}\right)$',r'$\log\left(x_\mathrm{H_{II}}\right)$',r'$\log\left(x_\mathrm{He_{I}}\right)$',r'$\log\left(x_\mathrm{He_{II}}\right)$',r'$\log\left(x_\mathrm{He_{III}}\right)$']
else:
ylabelFrac = [ r'$x_\mathrm{H_{I}}$',r'$x_\mathrm{H_{II}}$',r'$x_\mathrm{He_{I}}$',r'$x_\mathrm{He_{II}}$',r'$x_\mathrm{He_{III}}$',]
# labels for temperatures
if logT == True:
ylabelTemp = [ r'$\log\left(T_{\mathrm{kin}}/\mathrm{K}\right)$', r'$\log\left(T_{\mathrm{Spin}}/\mathrm{K}\right)$', r'$\log\left(\delta T_{\mathrm{B}}/\mathrm{K}\right)$']
else:
ylabelTemp = [ r'$T_{\mathrm{kin}}$',r'$T_{\mathrm{Spin}}$',r'$\delta T_{\mathrm{B}}\, [K]$']
#ylabel = ['x_H','x_H_II','x_He','x_He_II','x_He_III','T_e','T_Spin','T_Brig']
ylabel = ylabelFrac + ylabelTemp
if logFractions == True:
for i in range(0,5):
ylimits[i][0] = math.log10(ylimits[i][0])
ylimits[i][1] = math.log10(ylimits[i][1])
# loop over all input
for k in range(0,len(inputFiles)):
fileName = inputFiles[k].split('/')[-1]
data = np.transpose(np.genfromtxt(inputFiles[k],dtype='float'))
if( len(legends) == len(inputFiles) ):
tmpLegend = legends[k]
else:
tmpLegend = fileName
if k>= stylechange:
if doColor:
lineColor = color[k-stylechange]
else:
lineColor = 'black'
linestyle = '--'
else:
if doColor:
lineColor = color[k]
else:
lineColor = 'black'
linestyle = '-'
# if color or ls were set by user, override the above:
if ls:
linestyle = ls[k]
if colors:
lineColor = color[k]
for i in range(len(data[0])-1,-1,-1):
if np.isnan(data[9,i]):
data[2:10,i] = data[2:10,i+1]
#print len(data[0])
#for i in range(len(data[0])-1,-1,-1):
#if np.any(data[2:10,i]<0):
##print i
#data = np.delete(data, [i], axis=1)
##np.delete(data[:,i])
#print len(data[0])
if logT == True:
for i in range(0,len(data[0])):
data[7,i] = math.log10(data[7,i])
data[8,i] = math.log10(data[8,i])
#data[9,i] = math.log10(data[9,i])
if logFractions == True:
#axarr[0,0].set_yscale("log")
#axarr[0,1].set_yscale("log")
#axarr[1,0].set_yscale("log")
#axarr[1,1].set_yscale("log")
#axarr[2,0].set_yscale("log")
for ii in range(2,7):
for i in range(0,len(data[0])):
value = math.fabs(data[ii,i])
if value == 0.0:
value = 1.0e-15
data[ii,i] = math.log10(value) # some of Rajat's old profiles have negative fractions
# plot stuff
for i in range(0,rows):
for j in range(0,columns):
if linestyle == '-':
axarr[i,j].plot( data[1], data[(i+1)*columns+j], linestyle, lw=1.8, color=lineColor, label=r'$\mathrm{%s}$'%(tmpLegend) )
if linestyle=='--':
axarr[i,j].plot( data[1], data[(i+1)*columns+j], linestyle, lw=1.8, dashes=(9, 2), color=lineColor, label=r'$\mathrm{%s}$'%(tmpLegend) )
if linestyle=='-.':
axarr[i,j].plot( data[1], data[(i+1)*columns+j], linestyle, lw=1.8, dashes=(1, 2, 8, 2), color=lineColor, label=r'$\mathrm{%s}$'%(tmpLegend) )
for i in range(0,rows):
for j in range(0,columns):
#print i*columns+j
axarr[i, j].set_xlim(xLimits[0],xLimits[1])
if ylimits[i*columns+j][0] == None:
ymin = axarr[i, j].get_ylim()[0]
else:
ymin = ylimits[i*columns+j][0]
if ylimits[i*columns+j][1] == None:
ymax = axarr[i, j].get_ylim()[1]
else:
ymax = ylimits[i*columns+j][1]
axarr[i, j].set_ylim(ymin,ymax)
if i == rows-1:
axarr[i, j].set_xlabel(xlabel)#,fontsize=20)
axarr[i, j].xaxis.label.set_size(labelFontSize)
if j == columns-1:
axarr[i, j].yaxis.tick_right()
axarr[i, j].yaxis.set_ticks_position('both')
axarr[i, j].yaxis.set_label_position("right")
axarr[i, j].set_ylabel(ylabel[i*columns+j])
axarr[i, j].yaxis.label.set_size(labelFontSize)
else:
axarr[i, j].yaxis.tick_left()
axarr[i, j].yaxis.set_ticks_position('both')
axarr[i, j].yaxis.set_label_position("left")
axarr[i, j].set_ylabel(ylabel[i*columns+j])
axarr[i, j].yaxis.label.set_size(labelFontSize)
f.subplots_adjust(hspace=plotdistY)
f.subplots_adjust(wspace=plotdistX)
if showLegend:
lgd = plt.legend(loc='upper center', bbox_to_anchor=(-0.1, -0.20), ncol=ncolsLegend, fontsize=10)#, fancybox=True, shadow=True)
#plt.minorticks_on()
#plt.savefig('example_plot.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig(outFile, bbox_extra_artists=(lgd,), bbox_inches='tight')
else:
plt.savefig(outFile, bbox_inches='tight')
| gpl-3.0 |
lol/BCI-BO-old | plot_iii3b_old.py | 1 | 4787 | import numpy as np
import matplotlib.pyplot as plt
import math
from pylab import figure
from my_plotter import *
import os
import sys
sys.path.append('./BCI_Framework')
import Main
import Single_Job_runner as SJR
import os
import re
if __name__ == '__main__':
bciciv1 = Main.Main('BCI_Framework','BCICIII3b','RANDOM_FOREST', 'BP', 'ALL', -1, 'python')
res_path = bciciv1.config.configuration['results_opt_path_str']
classifiers_dict = {'Boosting':0, 'LogisticRegression':1, 'RANDOM_FOREST':2,'SVM':3, 'LDA':4, 'QDA':5 , 'MLP':6}
features_dict = {'BP':0, 'logbp':1, 'wackerman':2, 'morlet':3, 'AR':4}
results = np.zeros((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]))
discarded_periods = np.empty((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]), dtype='S10')
subjects_dict = {}
for ind, subj in enumerate(bciciv1.config.configuration["subject_names_str"]):
subjects_dict.update({subj:ind})
for dirname, dirnames, filenames in os.walk(res_path):
# for subdirname in dirnames:
# fold_name = os.path.join(dirname, subdirname)
# print fold_name
for filename in filenames:
# slash_indices = re.search('0', filename)
if filename[-4:] != '.pkl':
file_name = os.path.join(dirname, filename)
backslash_indices = [m.start() for m in re.finditer("\\\\", file_name)]
underline_indices = [m.start() for m in re.finditer("_", file_name)]
feature_ext_name = file_name[backslash_indices[-2]+1:backslash_indices[-1]]
classifier_name = file_name[backslash_indices[-3]+1:backslash_indices[-2]]
subj = file_name[underline_indices[-1]+1:-4]
# print feature_ext_name, classifier_name, subj
npzfile = np.load(file_name)
error = npzfile['error']
accuracy = 100 - error*100
results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
# with open(file_name,'r') as my_file:
#
# error = float(my_file.readline())
# accuracy = 100 - error*100
# results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
## print file_name[backslash_indices[-1]+1:underline_indices[1]]
# discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
#
# print backslash_indices
for feature in features_dict.keys():
f_ind = features_dict[feature]
feature_ext_y = []
labels = []
for subject in subjects_dict.keys():
subj_ind = subjects_dict[subject]
feature_ext_y.append(tuple(results[:,f_ind,subj_ind]))
labels.append(feature + '_' + subject)
# plotter( feature_ext_y, math.floor(np.min(feature_ext_y) - 1), math.floor(np.max(feature_ext_y) + 1), feature, labels)
plotter( feature_ext_y, 46, 97, feature, labels)
for subject in subjects_dict.keys():
for feature in features_dict.keys():
print subject, feature, discarded_periods[:, features_dict[feature],subjects_dict[subject]]
# BP_y = [(72.96,78.62,78.62,76.11,79.25,79.88), (64.45,65.38,65.75,65.00,67.04,66.67), (69.45,71.86,74.26,72.04,69.75,72.6)]
# labels = ['BP_O3','BP_S4','BP_X11']
# plotter( BP_y, 64, 81, 'BP', labels)
# logBP_y = [(74.22,79.25,79.25,77.36,81.77,81.77), (62.23,66.49,66.30,65.38,66.86,66.86), (69.82,72.97,73.15,71.86,74.63,74.63)]
# labels = ['LOGBP_O3','LOGBP_S4','LOGBP_X11']
# plotter( logBP_y, 61, 84, 'logBP', labels)
# wackermann_y = [(56.61,57.24,58.24,54.72,54.72,59.75), (57.97,57.6,59.82,55.75,57.97,58.71), (60,50,57.24,61.49,60.56,62.23)]
# labels = ['wackerman_O3','wackerman_S4','wackerman_X11']
# plotter( wackermann_y, 49, 65, 'wackerman', labels)
# y_RF = [(77.98,76.72,76.72,79.87), (70.74,74.44,80.92,75.18),(75.92,73.51,77.03,78.33),(76.11,77.36,58.5, 54.72), (65,65.38,53.34,55.75), (72.04,71.86,60,61.49)]
# labels = ['BO_RF_O3','BO_RF_S4','BO_RF_X11','RF_grid_search_O3','RF_grid_search_S4','RF_grid_search_X11']
# BO_plotter( y_RF, 49, 83, 'BO_RF', labels)
plt.show() | gpl-3.0 |
ybroze/trading-with-python | cookbook/workingWithDatesAndTime.py | 77 | 1551 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 17:45:02 2011
@author: jev
"""
import time
import datetime as dt
from pandas import *
from pandas.core import datetools
# basic functions
print 'Epoch start: %s' % time.asctime(time.gmtime(0))
print 'Seconds from epoch: %.2f' % time.time()
today = dt.date.today()
print type(today)
print 'Today is %s' % today.strftime('%Y.%m.%d')
# parse datetime
d = dt.datetime.strptime('20120803 21:59:59',"%Y%m%d %H:%M:%S")
# time deltas
someDate = dt.date(2011,8,1)
delta = today - someDate
print 'Delta :', delta
# calculate difference in dates
delta = dt.timedelta(days=20)
print 'Today-delta=', today-delta
t = dt.datetime(*time.strptime('3/30/2004',"%m/%d/%Y")[0:5])
# the '*' operator unpacks the tuple, producing the argument list.
print t
# print every 3d wednesday of the month
for month in xrange(1,13):
t = dt.date(2013,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
print t_new.strftime("%B, %d %Y (%A)")
#rng = DateRange(t, t+datetools.YearEnd())
#print rng
# create a range of times
start = dt.datetime(2012,8,1)+datetools.relativedelta(hours=9,minutes=30)
end = dt.datetime(2012,8,1)+datetools.relativedelta(hours=22)
rng = date_range(start,end,freq='30min')
for r in rng: print r.strftime("%Y%m%d %H:%M:%S") | bsd-3-clause |
ngoix/OCRF | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 16 | 2249 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
zhenv5/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
HIPS/optofit | optofit/test/data_utilities.py | 1 | 4524 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gamma
from optofit.models.hyperparameters import hypers
from optofit.models.model import point_parameter_model, DataSequence
from optofit.neuron.channels import *
from optofit.observation.observable import NewDirectCompartmentVoltage, LowPassCompartmentVoltage, IndependentObservations, LinearFluorescence
def pickle_model(model, filename):
pickle.dump(model_to_dict(model), open(filename, 'w'))
def model_to_dict(model):
return {
'observable': observation_to_dict(model.observation),
'data': [data_sequence_to_dict(ds) for ds in model.data_sequences],
'neuron': compartment_to_dict(model.population.neurons[0].compartments[0])
}
def observation_to_dict(observation):
return {'sigma': observation.observations[0].sigma.value}
def channel_to_dict(channel):
return (channel.name, {'g': channel.g.value, 'E': channel.E.value})
def compartment_to_dict(compartment):
channels = dict([channel_to_dict(ch) for ch in compartment.channels] )
return channels
def data_sequence_to_dict(ds):
return {
'latent': ds.latent,
'observations': ds.observations,
'input': ds.input,
'states': ds.states
}
def conductances_from_name(samples, name):
return np.array([s['neuron'][name]['g'] for s in samples])
def model_dict_to_model(model_dict):
model, body = point_parameter_model(model_dict['neuron'])
observation = IndependentObservations('observations', model)
lp_body_voltage = LowPassCompartmentVoltage('lp body voltage', model, body, filterbins=20)
lp_body_voltage.sigma.value = model_dict['observable']['sigma']
observation.add_observation(lp_body_voltage)
model.add_observation(observation)
for ds in model_dict['data']:
t = .1 * np.array(range(len(ds['input'])))
model.add_data_sequence(DataSequence(None, t, ds['input'], ds['observations'], ds['latent'], ds['input'], ds['states']))
return model
def hist(true, samples, name, burn=20, ax = None):
if not ax:
plt.hist(conductances_from_name(samples[burn:], name))
plt.axvline(true['neuron'][name]['g'], color='r')
plt.show()
else:
data = conductances_from_name(samples[burn:], name)
ax.hist(data, 30, normed=1)
ax.axvline(true['neuron'][name]['g'], color='r')
x = np.linspace(np.min(data), np.max(data))
rv = gamma(hypers['a_g_' + name].value, scale = 1 / hypers['b_g_' + name].value)
ax.plot(x, rv.pdf(x), color='g')
import os.path
def zip_files(filename):
seed, true, samples = pickle.load(open(filename + "_1.pk", 'r'))
last = samples[-1]
i = 2
while os.path.isfile(filename + "_" + str(i) + ".pk"):
_, _, next_samples = pickle.load(open(filename + "_" + str(i) + ".pk", 'r'))
if not np.allclose(next_samples[0]['data'][0]['states'].view(np.float64), last['data'][0]['states'].view(np.float64)):
print "ERROR: first and last of the file aren't the same"
samples = samples + next_samples[1:]
last = next_samples[-1]
i += 1
return seed, true, samples
def path_trace(true, samples, name, ax = None):
if not ax:
plt.axhline(true['neuron'][name]['g'], color='r')
data = conductances_from_name(samples, name)
plt.plot(range(len(data)), data)
plt.show()
else:
ax.axhline(true['neuron'][name]['g'], color='r')
data = conductances_from_name(samples, name)
ax.plot(range(len(data)), data)
def plot_all(plot_fun, true, samples, burn = 20, names = ['leak', 'ca3kdr', 'ca3ka', 'ca3na', 'ca3ca', 'ca3kahp', 'ca3kc', 'chr2']):
channels = names
f, axes = plt.subplots(len(channels))
for ax, name in zip(axes, channels):
plot_fun(true, samples[burn:], name, ax)
ax.set_ylabel(name)
plt.show()
def percentile_plot(true, samples, name, ax):
#print samples[0]['data']
#import pdb; pdb.set_trace()
data = np.array([s['data'][0]['latent']['neuron']['body'][name] for s in samples])
#print data
#import pdb; pdb.set_trace()
top = np.percentile(data, 97.5, axis=0)
bottom = np.percentile(data, 2.5, axis=0)
mean = np.mean(data, axis = 0)
t = range(len(mean))
ax.plot(t, mean, color = 'r')
ax.fill_between(t, top, bottom, facecolor="teal")
ax.plot(t, true['data'][0]['latent']['neuron']['body'][name], color='b')
| gpl-2.0 |
cpcloud/bokeh | bokeh/mpl_helpers.py | 3 | 5357 | "Helpers function for mpl module."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
from itertools import cycle, islice
from scipy import interpolate, signal
from .objects import Glyph
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def convert_color(mplcolor):
"Converts mpl color formats to Bokeh color formats."
charmap = dict(b="blue", g="green", r="red", c="cyan", m="magenta",
y="yellow", k="black", w="white")
if mplcolor in charmap:
return charmap[mplcolor]
try:
colorfloat = float(mplcolor)
if 0 <= colorfloat <= 1.0:
# This is a grayscale value
return tuple([int(255 * colorfloat)] * 3)
except:
pass
if isinstance(mplcolor, tuple):
# These will be floats in the range 0..1
return int(255 * mplcolor[0]), int(255 * mplcolor[1]), int(255 * mplcolor[2])
return mplcolor
def convert_dashes(dash):
""" Converts a Matplotlib dash specification
bokeh.properties.DashPattern supports the matplotlib named dash styles,
but not the little shorthand characters. This function takes care of
mapping those.
"""
mpl_dash_map = {
"-": "solid",
"--": "dashed",
":": "dotted",
"-.": "dashdot",
}
# If the value doesn't exist in the map, then just return the value back.
return mpl_dash_map.get(dash, dash)
def delete_last_col(x):
"Just delete the last column of the array."
x = np.delete(x, (-1), axis=1)
return x
def get_props_cycled(col, prop, fx=lambda x: x):
""" We need to cycle the `get.property` list (where property can be colors,
line_width, etc) as matplotlib does. We use itertools tools for do this
cycling ans slice manipulation.
Parameters:
col: matplotlib collection object
prop: property we want to get from matplotlib collection
fx: funtion (optional) to transform the elements from list obtained
after the property call. Deafults to identity function.
"""
n = len(col.get_paths())
t_prop = [fx(x) for x in prop]
sliced = islice(cycle(t_prop), None, n)
return list(sliced)
def is_ax_end(r):
"Check if the 'name' (if it exists) in the Glyph's datasource is 'ax_end'"
if isinstance(r, Glyph):
try:
if r.data_source.data["name"] == "ax_end":
return True
except KeyError as e:
return False
else:
return False
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.001, f3=5):
"""
Mimic a hand-drawn line from (x, y) data
Source: http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int | bsd-3-clause |
fengzhyuan/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
WarrenWeckesser/scikits-image | skimage/viewer/viewers/core.py | 33 | 13265 | """
ImageViewer class for viewing and interacting with images.
"""
import numpy as np
from ... import io, img_as_float
from ...util.dtype import dtype_range
from ...exposure import rescale_intensity
from ..qt import QtWidgets, Qt, Signal
from ..widgets import Slider
from ..utils import (dialogs, init_qtapp, figimage, start_qtapp,
update_axes_image)
from ..utils.canvas import BlitManager, EventManager
from ..plugins.base import Plugin
__all__ = ['ImageViewer', 'CollectionViewer']
def mpl_image_to_rgba(mpl_image):
"""Return RGB image from the given matplotlib image object.
Each image in a matplotlib figure has its own colormap and normalization
function. Return RGBA (RGB + alpha channel) image with float dtype.
Parameters
----------
mpl_image : matplotlib.image.AxesImage object
The image being converted.
Returns
-------
img : array of float, shape (M, N, 4)
An image of float values in [0, 1].
"""
image = mpl_image.get_array()
if image.ndim == 2:
input_range = (mpl_image.norm.vmin, mpl_image.norm.vmax)
image = rescale_intensity(image, in_range=input_range)
# cmap complains on bool arrays
image = mpl_image.cmap(img_as_float(image))
elif image.ndim == 3 and image.shape[2] == 3:
# add alpha channel if it's missing
image = np.dstack((image, np.ones_like(image)))
return img_as_float(image)
class ImageViewer(QtWidgets.QMainWindow):
"""Viewer for displaying images.
This viewer is a simple container object that holds a Matplotlib axes
for showing images. `ImageViewer` doesn't subclass the Matplotlib axes (or
figure) because of the high probability of name collisions.
Subclasses and plugins will likely extend the `update_image` method to add
custom overlays or filter the displayed image.
Parameters
----------
image : array
Image being viewed.
Attributes
----------
canvas, fig, ax : Matplotlib canvas, figure, and axes
Matplotlib canvas, figure, and axes used to display image.
image : array
Image being viewed. Setting this value will update the displayed frame.
original_image : array
Plugins typically operate on (but don't change) the *original* image.
plugins : list
List of attached plugins.
Examples
--------
>>> from skimage import data
>>> image = data.coins()
>>> viewer = ImageViewer(image) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
dock_areas = {'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea}
# Signal that the original image has been changed
original_image_changed = Signal(np.ndarray)
def __init__(self, image, useblit=True):
# Start main loop
init_qtapp()
super(ImageViewer, self).__init__()
#TODO: Add ImageViewer to skimage.io window manager
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("Image Viewer")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('Open file', self.open_file,
Qt.CTRL + Qt.Key_O)
self.file_menu.addAction('Save to file', self.save_to_file,
Qt.CTRL + Qt.Key_S)
self.file_menu.addAction('Quit', self.close,
Qt.CTRL + Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.main_widget = QtWidgets.QWidget()
self.setCentralWidget(self.main_widget)
if isinstance(image, Plugin):
plugin = image
image = plugin.filtered_image
plugin.image_changed.connect(self._update_original_image)
# When plugin is started, start
plugin._started.connect(self._show)
self.fig, self.ax = figimage(image)
self.canvas = self.fig.canvas
self.canvas.setParent(self)
self.ax.autoscale(enable=False)
self._tools = []
self.useblit = useblit
if useblit:
self._blit_manager = BlitManager(self.ax)
self._event_manager = EventManager(self.ax)
self._image_plot = self.ax.images[0]
self._update_original_image(image)
self.plugins = []
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.layout.addWidget(self.canvas)
status_bar = self.statusBar()
self.status_message = status_bar.showMessage
sb_size = status_bar.sizeHint()
cs_size = self.canvas.sizeHint()
self.resize(cs_size.width(), cs_size.height() + sb_size.height())
self.connect_event('motion_notify_event', self._update_status_bar)
def __add__(self, plugin):
"""Add plugin to ImageViewer"""
plugin.attach(self)
self.original_image_changed.connect(plugin._update_original_image)
if plugin.dock:
location = self.dock_areas[plugin.dock]
dock_location = Qt.DockWidgetArea(location)
dock = QtWidgets.QDockWidget()
dock.setWidget(plugin)
dock.setWindowTitle(plugin.name)
self.addDockWidget(dock_location, dock)
horiz = (self.dock_areas['left'], self.dock_areas['right'])
dimension = 'width' if location in horiz else 'height'
self._add_widget_size(plugin, dimension=dimension)
return self
def _add_widget_size(self, widget, dimension='width'):
widget_size = widget.sizeHint()
viewer_size = self.frameGeometry()
dx = dy = 0
if dimension == 'width':
dx = widget_size.width()
elif dimension == 'height':
dy = widget_size.height()
w = viewer_size.width()
h = viewer_size.height()
self.resize(w + dx, h + dy)
def open_file(self, filename=None):
"""Open image file and display in viewer."""
if filename is None:
filename = dialogs.open_file_dialog()
if filename is None:
return
image = io.imread(filename)
self._update_original_image(image)
def update_image(self, image):
"""Update displayed image.
This method can be overridden or extended in subclasses and plugins to
react to image changes.
"""
self._update_original_image(image)
def _update_original_image(self, image):
self.original_image = image # update saved image
self.image = image.copy() # update displayed image
self.original_image_changed.emit(image)
def save_to_file(self, filename=None):
"""Save current image to file.
The current behavior is not ideal: It saves the image displayed on
screen, so all images will be converted to RGB, and the image size is
not preserved (resizing the viewer window will alter the size of the
saved image).
"""
if filename is None:
filename = dialogs.save_file_dialog()
if filename is None:
return
if len(self.ax.images) == 1:
io.imsave(filename, self.image)
else:
underlay = mpl_image_to_rgba(self.ax.images[0])
overlay = mpl_image_to_rgba(self.ax.images[1])
alpha = overlay[:, :, 3]
# alpha can be set by channel of array or by a scalar value.
# Prefer the alpha channel, but fall back to scalar value.
if np.all(alpha == 1):
alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()
alpha = alpha[:, :, np.newaxis]
composite = (overlay[:, :, :3] * alpha +
underlay[:, :, :3] * (1 - alpha))
io.imsave(filename, composite)
def closeEvent(self, event):
self.close()
def _show(self, x=0):
self.move(x, 0)
for p in self.plugins:
p.show()
super(ImageViewer, self).show()
self.activateWindow()
self.raise_()
def show(self, main_window=True):
"""Show ImageViewer and attached plugins.
This behaves much like `matplotlib.pyplot.show` and `QWidget.show`.
"""
self._show()
if main_window:
start_qtapp()
return [p.output() for p in self.plugins]
def redraw(self):
if self.useblit:
self._blit_manager.redraw()
else:
self.canvas.draw_idle()
@property
def image(self):
return self._img
@image.setter
def image(self, image):
self._img = image
update_axes_image(self._image_plot, image)
# update display (otherwise image doesn't fill the canvas)
h, w = image.shape[:2]
self.ax.set_xlim(0, w)
self.ax.set_ylim(h, 0)
# update color range
clim = dtype_range[image.dtype.type]
if clim[0] < 0 and image.min() >= 0:
clim = (0, clim[1])
self._image_plot.set_clim(clim)
if self.useblit:
self._blit_manager.background = None
self.redraw()
def reset_image(self):
self.image = self.original_image.copy()
def connect_event(self, event, callback):
"""Connect callback function to matplotlib event and return id."""
cid = self.canvas.mpl_connect(event, callback)
return cid
def disconnect_event(self, callback_id):
"""Disconnect callback by its id (returned by `connect_event`)."""
self.canvas.mpl_disconnect(callback_id)
def _update_status_bar(self, event):
if event.inaxes and event.inaxes.get_navigate():
self.status_message(self._format_coord(event.xdata, event.ydata))
else:
self.status_message('')
def add_tool(self, tool):
if self.useblit:
self._blit_manager.add_artists(tool.artists)
self._tools.append(tool)
self._event_manager.attach(tool)
def remove_tool(self, tool):
if tool not in self._tools:
return
if self.useblit:
self._blit_manager.remove_artists(tool.artists)
self._tools.remove(tool)
self._event_manager.detach(tool)
def _format_coord(self, x, y):
# callback function to format coordinate display in status bar
x = int(x + 0.5)
y = int(y + 0.5)
try:
return "%4s @ [%4s, %4s]" % (self.image[y, x], x, y)
except IndexError:
return ""
class CollectionViewer(ImageViewer):
"""Viewer for displaying image collections.
Select the displayed frame of the image collection using the slider or
with the following keyboard shortcuts:
left/right arrows
Previous/next image in collection.
number keys, 0--9
0% to 90% of collection. For example, "5" goes to the image in the
middle (i.e. 50%) of the collection.
home/end keys
First/last image in collection.
Parameters
----------
image_collection : list of images
List of images to be displayed.
update_on : {'move' | 'release'}
Control whether image is updated on slide or release of the image
slider. Using 'on_release' will give smoother behavior when displaying
large images or when writing a plugin/subclass that requires heavy
computation.
"""
def __init__(self, image_collection, update_on='move', **kwargs):
self.image_collection = image_collection
self.index = 0
self.num_images = len(self.image_collection)
first_image = image_collection[0]
super(CollectionViewer, self).__init__(first_image)
slider_kws = dict(value=0, low=0, high=self.num_images - 1)
slider_kws['update_on'] = update_on
slider_kws['callback'] = self.update_index
slider_kws['value_type'] = 'int'
self.slider = Slider('frame', **slider_kws)
self.layout.addWidget(self.slider)
#TODO: Adjust height to accomodate slider; the following doesn't work
# s_size = self.slider.sizeHint()
# cs_size = self.canvas.sizeHint()
# self.resize(cs_size.width(), cs_size.height() + s_size.height())
def update_index(self, name, index):
"""Select image on display using index into image collection."""
index = int(round(index))
if index == self.index:
return
# clip index value to collection limits
index = max(index, 0)
index = min(index, self.num_images - 1)
self.index = index
self.slider.val = index
self.update_image(self.image_collection[index])
def keyPressEvent(self, event):
if type(event) == QtWidgets.QKeyEvent:
key = event.key()
# Number keys (code: 0 = key 48, 9 = key 57) move to deciles
if 48 <= key < 58:
index = 0.1 * int(key - 48) * self.num_images
self.update_index('', index)
event.accept()
else:
event.ignore()
else:
event.ignore()
| bsd-3-clause |
sinhrks/pyopendata | pyopendata/tests/test_worldbank.py | 1 | 5911 | # pylint: disable-msg=E1101,W0613,W0603
from pyopendata import WorldBankStore, WorldBankResource
import numpy as np
import pandas as pd
from pandas.compat import range
import pandas.util.testing as tm
class TestWorldBankTestSite(tm.TestCase):
def setUp(self):
self.store = WorldBankStore()
def test_isvalid(self):
self.assertTrue(self.store.is_valid())
def test_get_gdp_per_capita(self):
resource = self.store.get('NY.GDP.PCAP.CD')
self.assertTrue(isinstance(resource, WorldBankResource))
df = resource.read()
jp = np.array([478.99534016, 563.58675984, 633.64031517, 717.86691523,
835.65725248, 919.77668818, 1058.50356091, 1228.9092104,
1450.61965234, 1669.09819991, 2003.64704736, 2234.26166585,
2917.65897572, 3931.30162742, 4281.35992841, 4581.57438948,
5111.29514922, 6230.33568811, 8675.01399673, 8953.59152028,
9307.83929459, 10212.3781359, 9428.87465037, 10213.95827931,
10786.78618095, 11465.72578163, 16882.27395207, 20355.60522244,
24592.77200535, 24505.76729587, 25123.63178621, 28540.7714826,
31013.64714836, 35451.29751157, 38814.89437898, 42522.06659061,
37421.67385771, 34294.89897666, 30967.28808909, 34998.80997175,
37291.70615804, 32716.41867489, 31235.58818439, 33690.93772972,
36441.50449394, 35781.16626514, 34102.11477775, 34095.02343297,
37972.0557387, 39473.36750954, 43117.82967369, 46203.69803728,
46548.26963715, 38492.08889474])
us = np.array([2881.0997978, 2934.55277761, 3107.93741663, 3232.2080093,
3423.39628164, 3664.8018704, 3972.12308995, 4152.01983719,
4491.42430453, 4802.64248506, 5246.96174629, 5623.58844463,
6109.6924191, 6741.10113303, 7242.32420249, 7819.95897635,
8611.46146261, 9471.5286575, 10587.41604331, 11695.36335562,
12597.64550556, 13992.92269879, 14439.01512535, 15561.26813578,
17134.3157002, 18269.27926565, 19114.82386844, 20100.78872751,
21483.11445037, 22922.46545039, 23954.52342132, 24404.99484151,
25492.95555018, 26464.7832594, 27776.42650289, 28781.94969168,
30068.22720625, 31572.63521567, 32948.95125682, 34639.11983945,
36467.29542582, 37285.81592335, 38175.37638297, 39682.47224732,
41928.88613648, 44313.58524128, 46443.81019859, 48070.38468627,
48407.0769099, 46998.82041531, 48357.67356926, 49854.52266835,
51755.21484065, 53142.88963052])
index = pd.DatetimeIndex(map(str, range(1960, 2014)), name='date')
for label, values in [('Japan', jp), ('United States', us)]:
expected = pd.Series(values, index=index)
result = df['GDP per capita (current US$)'][label]['1960':'2013']
tm.assert_series_equal(result, expected)
raw_data = resource.read(raw=True)
self.assertTrue(len(raw_data) > 0)
def test_get_co2_emit(self):
resource = self.store.get('EN.ATM.CO2E.PC')
self.assertTrue(isinstance(resource, WorldBankResource))
df = resource.read()
jp = np.array([2.51653752, 2.98197939, 3.05973635, 3.35932078,
3.67303507, 3.91290553, 4.20626471, 4.86355785,
5.56659316, 6.33852317, 7.36808874, 7.54556103,
7.96146247, 8.47295875, 8.31387944, 7.77267069,
8.05971943, 8.21349644, 7.86685725, 8.24734789,
8.11401701, 7.90159205, 7.59990231, 7.41108575,
7.83324828, 7.5806754, 7.5340533, 7.41845835,
8.06669419, 8.3299481, 8.86239902, 8.88086258,
9.04436538, 8.90155004, 9.39514505, 9.43841991,
9.58652198, 9.52987839, 9.16909548, 9.45947022,
9.61290451, 9.45557013, 9.54726303, 9.68876079,
9.85946288, 9.69047361, 9.63791597, 9.79204079,
9.4508838, 8.62862707, 9.18565087])
us = np.array([15.99977916, 15.68125552, 16.0139375, 16.48276215,
16.96811858, 17.45172525, 18.12107301, 18.59831788,
19.08938916, 19.85794566, 21.11125227, 20.98020348,
21.74864198, 22.51058213, 21.50293038, 20.40222407,
21.15761537, 21.53248401, 21.96514631, 21.77411499,
20.77751491, 19.7492974, 18.56395007, 18.54180517,
18.95611586, 18.85669719, 18.70287126, 19.33406449,
19.9946205, 20.0595756, 19.10135589, 19.07931196,
19.1887997, 19.35128717, 19.46428538, 19.36385544,
19.62199049, 19.87640503, 19.77891432, 19.82400911,
20.24918916, 19.65619321, 19.6469218, 19.58465737,
19.7768452, 19.7159606, 19.22922866, 19.34957722,
18.60227269, 17.31529716, 17.56415999])
index = pd.DatetimeIndex(map(str, range(1960, 2011)), name='date')
for label, values in [('Japan', jp), ('United States', us)]:
expected = pd.Series(values, index=index)
result = df['CO2 emissions (metric tons per capita)'][label]['1960':'2010']
tm.assert_series_equal(result, expected)
raw_data = resource.read(raw=True)
self.assertTrue(len(raw_data) > 0)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
| bsd-2-clause |
masfaraud/volmdlr | scripts/primitives/sweep.py | 1 | 1656 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 15:16:33 2018
@author: steven
"""
import volmdlr as vm
import volmdlr.primitives3d as primitives3d
import volmdlr.wires as wires
import numpy as npy
import random
import matplotlib.pyplot as plt
p1 = vm.Point3D(0, 0, 0)
p2 = vm.Point3D(-0.150, 0, 0)
p3 = vm.Point3D(-0.150, 0.215, 0)
p4 = vm.Point3D(-0.150, 0.215, -0.058)
p5 = vm.Point3D(-0.175, 0.186, -0.042)
points = [p1, p2, p3, p4, p5]
radius = {1: 0.015, 2: 0.020, 3: 0.005}
current_point = p5
#points = [p1, p2]
#radius = {1: 0.010}
for i in range(6):
current_point += vm.Point3D.random(-0.1, 0.3, -0.1, 0.3, -0.1, 0.3)
points.append(current_point)
radius[4+i] = 0.01 + 0.03 * random.random()
#print(radius)
# c = vm.Circle3D(p1, 0.008, p2-p1)
contour = wires.Circle2D(vm.O2D, 0.008)
rl = primitives3d.OpenRoundedLineSegments3D(points, radius, adapt_radius=True, name='wire')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for prim in rl.primitives :
prim.plot(ax=ax)
r1 = rl.to_dict()
r2 = primitives3d.OpenRoundedLineSegments3D.dict_to_object(r1)
c1 = contour.to_dict()
c2 = vm.wires.Circle2D.dict_to_object(c1)
# c1 = contour.to_dict()
# c2 = vm.Contour2D.dict_to_object(c1)
sweep = primitives3d.Sweep(contour, rl, name = 'Random pipe')
# sweepy = sweep.copy()
# v1 = vm.Vector3D((1,1,1))
# v1.Normalize()
# v2 = v1.deterministic_unit_normal_vector()
# v3 = v1.Cross(v2)
# frame0 = vm.Frame3D(vm.Point3D((0,0,0)), v1, v2, v3)
# frame_mapping = sweepy.frame_mapping(frame0, 'new', True)
model = vm.core.VolumeModel([sweep])
model.babylonjs()
model.to_step('sweep.step')
| gpl-3.0 |
cloudera/ibis | ibis/backends/tests/base.py | 1 | 11462 | import abc
import inspect
from pathlib import Path
from typing import Any, Callable, Mapping, Optional
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.backends.base_sqlalchemy.compiler as comp
import ibis.expr.types as ir
# TODO: Merge into BackendTest, #2564
class RoundingConvention:
@staticmethod
@abc.abstractmethod
def round(series: pd.Series, decimals: int = 0) -> pd.Series:
"""Round a series to `decimals` number of decimal values."""
# TODO: Merge into BackendTest, #2564
class RoundAwayFromZero(RoundingConvention):
@staticmethod
def round(series: pd.Series, decimals: int = 0) -> pd.Series:
if not decimals:
return (
-(np.sign(series)) * np.ceil(-(series.abs()) - 0.5)
).astype(np.int64)
return series.round(decimals=decimals)
# TODO: Merge into BackendTest, #2564
class RoundHalfToEven(RoundingConvention):
@staticmethod
def round(series: pd.Series, decimals: int = 0) -> pd.Series:
result = series.round(decimals=decimals)
return result if decimals else result.astype(np.int64)
# TODO: Merge into BackendTest, #2564
class UnorderedComparator:
@classmethod
def assert_series_equal(
cls, left: pd.Series, right: pd.Series, *args: Any, **kwargs: Any
) -> None:
left = left.sort_values().reset_index(drop=True)
right = right.sort_values().reset_index(drop=True)
return super().assert_series_equal(left, right, *args, **kwargs)
@classmethod
def assert_frame_equal(
cls, left: pd.DataFrame, right: pd.DataFrame, *args: Any, **kwargs: Any
) -> None:
columns = list(set(left.columns) & set(right.columns))
left = left.sort_values(by=columns)
right = right.sort_values(by=columns)
return super().assert_frame_equal(left, right, *args, **kwargs)
class BackendTest(abc.ABC):
check_dtype = True
check_names = True
supports_arrays = True
supports_arrays_outside_of_select = supports_arrays
supports_window_operations = True
additional_skipped_operations = frozenset()
supports_divide_by_zero = False
returned_timestamp_unit = 'us'
supported_to_timestamp_units = {'s', 'ms', 'us'}
supports_floating_modulus = True
def __init__(self, data_directory: Path) -> None:
self.api # skips if we can't access the backend
self.connection = self.connect(data_directory)
def __str__(self):
return f'<BackendTest {self.name()}>'
@classmethod
def name(cls) -> str:
backend_tests_path = inspect.getmodule(cls).__file__
return Path(backend_tests_path).resolve().parent.parent.name
@staticmethod
@abc.abstractmethod
def connect(data_directory: Path) -> ibis.client.Client:
"""Return a connection with data loaded from `data_directory`."""
@classmethod
def assert_series_equal(
cls, left: pd.Series, right: pd.Series, *args: Any, **kwargs: Any
) -> None:
kwargs.setdefault('check_dtype', cls.check_dtype)
kwargs.setdefault('check_names', cls.check_names)
tm.assert_series_equal(left, right, *args, **kwargs)
@classmethod
def assert_frame_equal(
cls, left: pd.DataFrame, right: pd.DataFrame, *args: Any, **kwargs: Any
) -> None:
left = left.reset_index(drop=True)
right = right.reset_index(drop=True)
tm.assert_frame_equal(left, right, *args, **kwargs)
@staticmethod
def default_series_rename(
series: pd.Series, name: str = 'tmp'
) -> pd.Series:
return series.rename(name)
@staticmethod
def greatest(
f: Callable[..., ir.ValueExpr], *args: ir.ValueExpr
) -> ir.ValueExpr:
return f(*args)
@staticmethod
def least(
f: Callable[..., ir.ValueExpr], *args: ir.ValueExpr
) -> ir.ValueExpr:
return f(*args)
@property
def db(self) -> ibis.client.Database:
return self.connection.database()
@property
def functional_alltypes(self) -> ir.TableExpr:
return self.db.functional_alltypes
@property
def batting(self) -> ir.TableExpr:
return self.db.batting
@property
def awards_players(self) -> ir.TableExpr:
return self.db.awards_players
@property
def geo(self) -> Optional[ir.TableExpr]:
return None
@property
def api(self):
return getattr(ibis, self.name())
def make_context(
self, params: Optional[Mapping[ir.ValueExpr, Any]] = None
) -> comp.QueryContext:
return self.api.dialect.make_context(params=params)
# TODO move to the spark/pyspark backends, #2565
_spark_testing_client = None
_pyspark_testing_client = None
# TODO move to the sparn/pyspark backends, #2565
def get_spark_testing_client(data_directory):
global _spark_testing_client
if _spark_testing_client is None:
_spark_testing_client = get_common_spark_testing_client(
data_directory, lambda session: ibis.spark.connect(session)
)
return _spark_testing_client
# TODO move to the spark/pyspark backends, #2565
def get_pyspark_testing_client(data_directory):
global _pyspark_testing_client
if _pyspark_testing_client is None:
_pyspark_testing_client = get_common_spark_testing_client(
data_directory, lambda session: ibis.pyspark.connect(session)
)
return _pyspark_testing_client
# TODO move to the spark/pyspark backends, #2565
def get_common_spark_testing_client(data_directory, connect):
pytest.importorskip('pyspark')
import pyspark.sql.types as pt
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
_spark_testing_client = connect(spark)
s = _spark_testing_client._session
df_functional_alltypes = s.read.csv(
path=str(data_directory / 'functional_alltypes.csv'),
schema=pt.StructType(
[
pt.StructField('index', pt.IntegerType(), True),
pt.StructField('Unnamed: 0', pt.IntegerType(), True),
pt.StructField('id', pt.IntegerType(), True),
# cast below, Spark can't read 0/1 as bool
pt.StructField('bool_col', pt.ByteType(), True),
pt.StructField('tinyint_col', pt.ByteType(), True),
pt.StructField('smallint_col', pt.ShortType(), True),
pt.StructField('int_col', pt.IntegerType(), True),
pt.StructField('bigint_col', pt.LongType(), True),
pt.StructField('float_col', pt.FloatType(), True),
pt.StructField('double_col', pt.DoubleType(), True),
pt.StructField('date_string_col', pt.StringType(), True),
pt.StructField('string_col', pt.StringType(), True),
pt.StructField('timestamp_col', pt.TimestampType(), True),
pt.StructField('year', pt.IntegerType(), True),
pt.StructField('month', pt.IntegerType(), True),
]
),
mode='FAILFAST',
header=True,
)
df_functional_alltypes = df_functional_alltypes.withColumn(
"bool_col", df_functional_alltypes["bool_col"].cast("boolean")
)
df_functional_alltypes.createOrReplaceTempView('functional_alltypes')
df_batting = s.read.csv(
path=str(data_directory / 'batting.csv'),
schema=pt.StructType(
[
pt.StructField('playerID', pt.StringType(), True),
pt.StructField('yearID', pt.IntegerType(), True),
pt.StructField('stint', pt.IntegerType(), True),
pt.StructField('teamID', pt.StringType(), True),
pt.StructField('lgID', pt.StringType(), True),
pt.StructField('G', pt.IntegerType(), True),
pt.StructField('AB', pt.DoubleType(), True),
pt.StructField('R', pt.DoubleType(), True),
pt.StructField('H', pt.DoubleType(), True),
pt.StructField('X2B', pt.DoubleType(), True),
pt.StructField('X3B', pt.DoubleType(), True),
pt.StructField('HR', pt.DoubleType(), True),
pt.StructField('RBI', pt.DoubleType(), True),
pt.StructField('SB', pt.DoubleType(), True),
pt.StructField('CS', pt.DoubleType(), True),
pt.StructField('BB', pt.DoubleType(), True),
pt.StructField('SO', pt.DoubleType(), True),
pt.StructField('IBB', pt.DoubleType(), True),
pt.StructField('HBP', pt.DoubleType(), True),
pt.StructField('SH', pt.DoubleType(), True),
pt.StructField('SF', pt.DoubleType(), True),
pt.StructField('GIDP', pt.DoubleType(), True),
]
),
header=True,
)
df_batting.createOrReplaceTempView('batting')
df_awards_players = s.read.csv(
path=str(data_directory / 'awards_players.csv'),
schema=pt.StructType(
[
pt.StructField('playerID', pt.StringType(), True),
pt.StructField('awardID', pt.StringType(), True),
pt.StructField('yearID', pt.IntegerType(), True),
pt.StructField('lgID', pt.StringType(), True),
pt.StructField('tie', pt.StringType(), True),
pt.StructField('notes', pt.StringType(), True),
]
),
header=True,
)
df_awards_players.createOrReplaceTempView('awards_players')
df_simple = s.createDataFrame([(1, 'a')], ['foo', 'bar'])
df_simple.createOrReplaceTempView('simple')
df_struct = s.createDataFrame([((1, 2, 'a'),)], ['struct_col'])
df_struct.createOrReplaceTempView('struct')
df_nested_types = s.createDataFrame(
[([1, 2], [[3, 4], [5, 6]], {'a': [[2, 4], [3, 5]]})],
[
'list_of_ints',
'list_of_list_of_ints',
'map_string_list_of_list_of_ints',
],
)
df_nested_types.createOrReplaceTempView('nested_types')
df_complicated = s.createDataFrame(
[({(1, 3): [[2, 4], [3, 5]]},)], ['map_tuple_list_of_list_of_ints']
)
df_complicated.createOrReplaceTempView('complicated')
df_udf = s.createDataFrame(
[('a', 1, 4.0, 'a'), ('b', 2, 5.0, 'a'), ('c', 3, 6.0, 'b')],
['a', 'b', 'c', 'key'],
)
df_udf.createOrReplaceTempView('udf')
df_udf_nan = s.createDataFrame(
pd.DataFrame(
{
'a': np.arange(10, dtype=float),
'b': [3.0, np.NaN] * 5,
'key': list('ddeefffggh'),
}
)
)
df_udf_nan.createOrReplaceTempView('udf_nan')
df_udf_null = s.createDataFrame(
[
(float(i), None if i % 2 else 3.0, 'ddeefffggh'[i])
for i in range(10)
],
['a', 'b', 'key'],
)
df_udf_null.createOrReplaceTempView('udf_null')
df_udf_random = s.createDataFrame(
pd.DataFrame(
{
'a': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'key': list('ddeefff'),
}
)
)
df_udf_random.createOrReplaceTempView('udf_random')
return _spark_testing_client
| apache-2.0 |
abgoswam/data-science-from-scratch | code-python3/nearest_neighbors.py | 4 | 7323 | from collections import Counter
from linear_algebra import distance
from statistics import mean
import math, random
import matplotlib.pyplot as plt
def raw_majority_vote(labels):
votes = Counter(labels)
winner, _ = votes.most_common(1)[0]
return winner
def majority_vote(labels):
"""assumes that labels are ordered from nearest to farthest"""
vote_counts = Counter(labels)
winner, winner_count = vote_counts.most_common(1)[0]
num_winners = len([count
for count in vote_counts.values()
if count == winner_count])
if num_winners == 1:
return winner # unique winner, so return it
else:
return majority_vote(labels[:-1]) # try again without the farthest
def knn_classify(k, labeled_points, new_point):
"""each labeled point should be a pair (point, label)"""
# order the labeled points from nearest to farthest
by_distance = sorted(labeled_points,
key=lambda point_label: distance(point_label[0], new_point))
# find the labels for the k closest
k_nearest_labels = [label for _, label in by_distance[:k]]
# and let them vote
return majority_vote(k_nearest_labels)
cities = [(-86.75,33.5666666666667,'Python'),(-88.25,30.6833333333333,'Python'),(-112.016666666667,33.4333333333333,'Java'),(-110.933333333333,32.1166666666667,'Java'),(-92.2333333333333,34.7333333333333,'R'),(-121.95,37.7,'R'),(-118.15,33.8166666666667,'Python'),(-118.233333333333,34.05,'Java'),(-122.316666666667,37.8166666666667,'R'),(-117.6,34.05,'Python'),(-116.533333333333,33.8166666666667,'Python'),(-121.5,38.5166666666667,'R'),(-117.166666666667,32.7333333333333,'R'),(-122.383333333333,37.6166666666667,'R'),(-121.933333333333,37.3666666666667,'R'),(-122.016666666667,36.9833333333333,'Python'),(-104.716666666667,38.8166666666667,'Python'),(-104.866666666667,39.75,'Python'),(-72.65,41.7333333333333,'R'),(-75.6,39.6666666666667,'Python'),(-77.0333333333333,38.85,'Python'),(-80.2666666666667,25.8,'Java'),(-81.3833333333333,28.55,'Java'),(-82.5333333333333,27.9666666666667,'Java'),(-84.4333333333333,33.65,'Python'),(-116.216666666667,43.5666666666667,'Python'),(-87.75,41.7833333333333,'Java'),(-86.2833333333333,39.7333333333333,'Java'),(-93.65,41.5333333333333,'Java'),(-97.4166666666667,37.65,'Java'),(-85.7333333333333,38.1833333333333,'Python'),(-90.25,29.9833333333333,'Java'),(-70.3166666666667,43.65,'R'),(-76.6666666666667,39.1833333333333,'R'),(-71.0333333333333,42.3666666666667,'R'),(-72.5333333333333,42.2,'R'),(-83.0166666666667,42.4166666666667,'Python'),(-84.6,42.7833333333333,'Python'),(-93.2166666666667,44.8833333333333,'Python'),(-90.0833333333333,32.3166666666667,'Java'),(-94.5833333333333,39.1166666666667,'Java'),(-90.3833333333333,38.75,'Python'),(-108.533333333333,45.8,'Python'),(-95.9,41.3,'Python'),(-115.166666666667,36.0833333333333,'Java'),(-71.4333333333333,42.9333333333333,'R'),(-74.1666666666667,40.7,'R'),(-106.616666666667,35.05,'Python'),(-78.7333333333333,42.9333333333333,'R'),(-73.9666666666667,40.7833333333333,'R'),(-80.9333333333333,35.2166666666667,'Python'),(-78.7833333333333,35.8666666666667,'Python'),(-100.75,46.7666666666667,'Java'),(-84.5166666666667,39.15,'Java'),(-81.85,41.4,'Java'),(-82.8833333333333,40,'Java'),(-97.6,35.4,'Python'),(-122.666666666667,45.5333333333333,'Python'),(-75.25,39.8833333333333,'Python'),(-80.2166666666667,40.5,'Python'),(-71.4333333333333,41.7333333333333,'R'),(-81.1166666666667,33.95,'R'),(-96.7333333333333,43.5666666666667,'Python'),(-90,35.05,'R'),(-86.6833333333333,36.1166666666667,'R'),(-97.7,30.3,'Python'),(-96.85,32.85,'Java'),(-95.35,29.9666666666667,'Java'),(-98.4666666666667,29.5333333333333,'Java'),(-111.966666666667,40.7666666666667,'Python'),(-73.15,44.4666666666667,'R'),(-77.3333333333333,37.5,'Python'),(-122.3,47.5333333333333,'Python'),(-89.3333333333333,43.1333333333333,'R'),(-104.816666666667,41.15,'Java')]
cities = [([longitude, latitude], language) for longitude, latitude, language in cities]
def plot_state_borders(plt, color='0.8'):
pass
def plot_cities():
# key is language, value is pair (longitudes, latitudes)
plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) }
# we want each language to have a different marker and color
markers = { "Java" : "o", "Python" : "s", "R" : "^" }
colors = { "Java" : "r", "Python" : "b", "R" : "g" }
for (longitude, latitude), language in cities:
plots[language][0].append(longitude)
plots[language][1].append(latitude)
# create a scatter series for each language
for language, (x, y) in plots.items():
plt.scatter(x, y, color=colors[language], marker=markers[language],
label=language, zorder=10)
plot_state_borders(plt) # assume we have a function that does this
plt.legend(loc=0) # let matplotlib choose the location
plt.axis([-130,-60,20,55]) # set the axes
plt.title("Favorite Programming Languages")
plt.show()
def classify_and_plot_grid(k=1):
plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) }
markers = { "Java" : "o", "Python" : "s", "R" : "^" }
colors = { "Java" : "r", "Python" : "b", "R" : "g" }
for longitude in range(-130, -60):
for latitude in range(20, 55):
predicted_language = knn_classify(k, cities, [longitude, latitude])
plots[predicted_language][0].append(longitude)
plots[predicted_language][1].append(latitude)
# create a scatter series for each language
for language, (x, y) in plots.items():
plt.scatter(x, y, color=colors[language], marker=markers[language],
label=language, zorder=0)
plot_state_borders(plt, color='black') # assume we have a function that does this
plt.legend(loc=0) # let matplotlib choose the location
plt.axis([-130,-60,20,55]) # set the axes
plt.title(str(k) + "-Nearest Neighbor Programming Languages")
plt.show()
#
# the curse of dimensionality
#
def random_point(dim):
return [random.random() for _ in range(dim)]
def random_distances(dim, num_pairs):
return [distance(random_point(dim), random_point(dim))
for _ in range(num_pairs)]
if __name__ == "__main__":
# try several different values for k
for k in [1, 3, 5, 7]:
num_correct = 0
for location, actual_language in cities:
other_cities = [other_city
for other_city in cities
if other_city != (location, actual_language)]
predicted_language = knn_classify(k, other_cities, location)
if predicted_language == actual_language:
num_correct += 1
print(k, "neighbor[s]:", num_correct, "correct out of", len(cities))
dimensions = range(1, 101, 5)
avg_distances = []
min_distances = []
random.seed(0)
for dim in dimensions:
distances = random_distances(dim, 10000) # 10,000 random pairs
avg_distances.append(mean(distances)) # track the average
min_distances.append(min(distances)) # track the minimum
print(dim, min(distances), mean(distances), min(distances) / mean(distances))
| unlicense |
CKehl/pylearn2 | pylearn2/cross_validation/tests/test_cross_validation.py | 49 | 6767 | """
Tests for cross-validation module.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_train_cv():
"""Test TrainCV class."""
skip_if_no_sklearn()
handle, layer0_filename = tempfile.mkstemp()
handle, layer1_filename = tempfile.mkstemp()
handle, layer2_filename = tempfile.mkstemp()
# train the first hidden layer (unsupervised)
# (test for TrainCV)
trainer = yaml_parse.load(test_yaml_layer0 %
{'layer0_filename': layer0_filename})
trainer.main_loop()
# train the second hidden layer (unsupervised)
# (test for TransformerDatasetCV)
trainer = yaml_parse.load(test_yaml_layer1 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename})
trainer.main_loop()
# train the third hidden layer (unsupervised)
# (test for StackedBlocksCV)
trainer = yaml_parse.load(test_yaml_layer2 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename,
'layer2_filename': layer2_filename})
trainer.main_loop()
# train the full model (supervised)
# (test for PretrainedLayerCV)
trainer = yaml_parse.load(test_yaml_layer3 %
{'layer0_filename': layer0_filename,
'layer1_filename': layer1_filename,
'layer2_filename': layer2_filename})
trainer.main_loop()
# clean up
os.remove(layer0_filename)
os.remove(layer1_filename)
test_yaml_layer0 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer0_filename)s,
}
"""
test_yaml_layer1 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.TransformerDatasetCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
transformers: !pkl: %(layer0_filename)s,
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 8,
nhid: 6,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer1_filename)s,
}
"""
test_yaml_layer2 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.TransformerDatasetCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
transformers: !obj:pylearn2.cross_validation.blocks.StackedBlocksCV {
layers: [
!pkl: %(layer0_filename)s,
!pkl: %(layer1_filename)s,
],
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 6,
nhid: 4,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
save_path: %(layer2_filename)s,
}
"""
test_yaml_layer3 = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h0',
layer_content: !pkl: %(layer0_filename)s,
},
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h1',
layer_content: !pkl: %(layer1_filename)s,
},
!obj:pylearn2.cross_validation.mlp.PretrainedLayerCV {
layer_name: 'h2',
layer_content: !pkl: %(layer2_filename)s,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 2,
irange: 0.,
},
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
}
"""
| bsd-3-clause |
manashmndl/Data-Science-45min-Intros | support-vector-machines-101/svm-example.py | 26 | 2219 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
# cobbled together from refs:
# http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html
# http://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane.html
if len(sys.argv) > 1:
samples = int( sys.argv[1] )
c_std=2.0
else:
samples = 10
c_std=1.0
X, y = make_blobs(n_samples=samples, cluster_std=c_std, centers=2)
# make a plotting grid
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# svm
clf = SVC(kernel='linear').fit(X, y)
# predict all points in grid
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# separating plane and margins
w = clf.coef_[0]
a = -w[0] / w[1]
xxx = np.linspace(x_min, x_max)
yyy = a * xxx - (clf.intercept_[0]) / w[1]
# calculate the large margin boundaries defined by the support vectors
b = clf.support_vectors_[0]
yyy_down = a * xxx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yyy_up = a * xxx + (b[1] - a * b[0])
# plot margins
plt.figure(figsize=(8,6))
plt.plot(xxx, yyy, 'k-', linewidth=1)
plt.plot(xxx, yyy_down, 'k--', linewidth=1)
plt.plot(xxx, yyy_up, 'k--', linewidth=1)
# plot decision contours
Z = Z.reshape(xx.shape)
#plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.contourf(xx, yy, Z, alpha=0.25)
# plot data
plt.scatter(X[:, 0], X[:, 1],
s=100,
c=y,
alpha=0.8,
cmap=plt.cm.Paired
)
# plot support vectors
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300,
facecolors='none'
)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel('x')
plt.ylabel('y')
# SHOW ALL THE THINGS
plt.show()
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.