repo_name
stringlengths
6
67
path
stringlengths
5
185
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
962k
license
stringclasses
15 values
douggeiger/gnuradio
gr-digital/examples/example_fll.py
49
5715
#!/usr/bin/env python # # Copyright 2011-2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, digital, filter from gnuradio import blocks from gnuradio import channels from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser import sys try: import scipy except ImportError: print "Error: could not import scipy (http://www.scipy.org/)" sys.exit(1) try: import pylab except ImportError: print "Error: could not import pylab (http://matplotlib.sourceforge.net/)" sys.exit(1) class example_fll(gr.top_block): def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset): gr.top_block.__init__(self) rrc_taps = filter.firdes.root_raised_cosine( sps, sps, 1.0, rolloff, ntaps) data = 2.0*scipy.random.randint(0, 2, N) - 1.0 data = scipy.exp(1j*poffset) * data self.src = blocks.vector_source_c(data.tolist(), False) self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps) self.chn = channels.channel_model(noise, foffset, toffset) self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw) self.vsnk_src = blocks.vector_sink_c() self.vsnk_fll = blocks.vector_sink_c() self.vsnk_frq = blocks.vector_sink_f() self.vsnk_phs = blocks.vector_sink_f() self.vsnk_err = blocks.vector_sink_f() self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll) self.connect(self.rrc, self.vsnk_src) self.connect((self.fll,1), self.vsnk_frq) self.connect((self.fll,2), self.vsnk_phs) self.connect((self.fll,3), self.vsnk_err) def main(): parser = OptionParser(option_class=eng_option, conflict_handler="resolve") parser.add_option("-N", "--nsamples", type="int", default=2000, help="Set the number of samples to process [default=%default]") parser.add_option("-S", "--sps", type="int", default=4, help="Set the samples per symbol [default=%default]") parser.add_option("-r", "--rolloff", type="eng_float", default=0.35, help="Set the rolloff factor [default=%default]") parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0, help="Set the loop bandwidth [default=%default]") parser.add_option("-n", "--ntaps", type="int", default=45, help="Set the number of taps in the filters [default=%default]") parser.add_option("", "--noise", type="eng_float", default=0.0, help="Set the simulation noise voltage [default=%default]") parser.add_option("-f", "--foffset", type="eng_float", default=0.2, help="Set the simulation's normalized frequency offset (in Hz) [default=%default]") parser.add_option("-t", "--toffset", type="eng_float", default=1.0, help="Set the simulation's timing offset [default=%default]") parser.add_option("-p", "--poffset", type="eng_float", default=0.0, help="Set the simulation's phase offset [default=%default]") (options, args) = parser.parse_args () # Adjust N for the interpolation by sps options.nsamples = options.nsamples // options.sps # Set up the program-under-test put = example_fll(options.nsamples, options.sps, options.rolloff, options.ntaps, options.bandwidth, options.noise, options.foffset, options.toffset, options.poffset) put.run() data_src = scipy.array(put.vsnk_src.data()) data_err = scipy.array(put.vsnk_err.data()) # Convert the FLL's LO frequency from rads/sec to Hz data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi) # adjust this to align with the data. There are 2 filters of # ntaps long and the channel introduces another 4 sample delay. data_fll = scipy.array(put.vsnk_fll.data()[2*options.ntaps-4:]) # Plot the FLL's LO frequency f1 = pylab.figure(1, figsize=(12,10)) s1 = f1.add_subplot(2,2,1) s1.plot(data_frq) s1.set_title("FLL LO") s1.set_xlabel("Samples") s1.set_ylabel("Frequency (normalized Hz)") # Plot the FLL's error s2 = f1.add_subplot(2,2,2) s2.plot(data_err) s2.set_title("FLL Error") s2.set_xlabel("Samples") s2.set_ylabel("FLL Loop error") # Plot the IQ symbols s3 = f1.add_subplot(2,2,3) s3.plot(data_src.real, data_src.imag, "o") s3.plot(data_fll.real, data_fll.imag, "rx") s3.set_title("IQ") s3.set_xlabel("Real part") s3.set_ylabel("Imag part") # Plot the symbols in time s4 = f1.add_subplot(2,2,4) s4.plot(data_src.real, "o-") s4.plot(data_fll.real, "rx-") s4.set_title("Symbols") s4.set_xlabel("Samples") s4.set_ylabel("Real Part of Signals") pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
nguyentu1602/statsmodels
statsmodels/tools/tests/test_pca.py
25
13934
from __future__ import print_function, division from unittest import TestCase import warnings import numpy as np from numpy.testing import assert_allclose, assert_equal, assert_raises from numpy.testing.decorators import skipif import pandas as pd try: import matplotlib.pyplot as plt missing_matplotlib = False except ImportError: missing_matplotlib = True from statsmodels.tools.pca import PCA from statsmodels.tools.tests.results.datamlw import data, princomp1, princomp2 from statsmodels.compat.numpy import nanmean DECIMAL_5 = .00001 class TestPCA(TestCase): @classmethod def setUpClass(cls): rs = np.random.RandomState() rs.seed(1234) k = 3 n = 100 t = 200 lam = 2 norm_rng = rs.standard_normal e = norm_rng((t, n)) f = norm_rng((t, k)) b = rs.standard_gamma(lam, size=(k, n)) / lam cls.x = f.dot(b) + e cls.x_copy = cls.x + 0.0 cls.rs = rs k = 3 n = 300 t = 200 lam = 2 norm_rng = rs.standard_normal e = norm_rng((t, n)) f = norm_rng((t, k)) b = rs.standard_gamma(lam, size=(k, n)) / lam cls.x_wide = f.dot(b) + e @skipif(missing_matplotlib) def test_smoke_plot_and_repr(self): pc = PCA(self.x) fig = pc.plot_scree() fig = pc.plot_scree(ncomp=10) fig = pc.plot_scree(log_scale=False) fig = pc.plot_scree(cumulative=True) fig = pc.plot_rsquare() fig = pc.plot_rsquare(ncomp=5) # Additional smoke test pc.__repr__() pc = PCA(self.x, standardize=False) pc.__repr__() pc = PCA(self.x, standardize=False, demean=False) pc.__repr__() # Check data for no changes assert_equal(self.x, pc.data) def test_eig_svd_equiv(self): """ Test leading components since the tail end can differ """ pc_eig = PCA(self.x) pc_svd = PCA(self.x, method='svd') assert_allclose(pc_eig.projection, pc_svd.projection) assert_allclose(np.abs(pc_eig.factors[:, :2]), np.abs(pc_svd.factors[:, :2])) assert_allclose(np.abs(pc_eig.coeff[:2, :]), np.abs(pc_svd.coeff[:2, :])) assert_allclose(pc_eig.eigenvals, pc_svd.eigenvals) assert_allclose(np.abs(pc_eig.eigenvecs[:, :2]), np.abs(pc_svd.eigenvecs[:, :2])) pc_svd = PCA(self.x, method='svd', ncomp=2) pc_nipals = PCA(self.x, method='nipals', ncomp=2) assert_allclose(np.abs(pc_nipals.factors), np.abs(pc_svd.factors), atol=DECIMAL_5) assert_allclose(np.abs(pc_nipals.coeff), np.abs(pc_svd.coeff), atol=DECIMAL_5) assert_allclose(pc_nipals.eigenvals, pc_svd.eigenvals, atol=DECIMAL_5) assert_allclose(np.abs(pc_nipals.eigenvecs), np.abs(pc_svd.eigenvecs), atol=DECIMAL_5) # Check data for no changes assert_equal(self.x, pc_svd.data) # Check data for no changes assert_equal(self.x, pc_eig.data) # Check data for no changes assert_equal(self.x, pc_nipals.data) def test_options(self): pc = PCA(self.x) pc_no_norm = PCA(self.x, normalize=False) assert_allclose(pc.factors.dot(pc.coeff), pc_no_norm.factors.dot(pc_no_norm.coeff)) princomp = pc.factors assert_allclose(princomp.T.dot(princomp), np.eye(100), atol=1e-5) weights = pc_no_norm.coeff assert_allclose(weights.T.dot(weights), np.eye(100), atol=1e-5) pc_10 = PCA(self.x, ncomp=10) assert_allclose(pc.factors[:, :10], pc_10.factors) assert_allclose(pc.coeff[:10, :], pc_10.coeff) assert_allclose(pc.rsquare[:(10 + 1)], pc_10.rsquare) assert_allclose(pc.eigenvals[:10], pc_10.eigenvals) assert_allclose(pc.eigenvecs[:, :10], pc_10.eigenvecs) pc = PCA(self.x, standardize=False, normalize=False) mu = self.x.mean(0) xdm = self.x - mu xpx = xdm.T.dot(xdm) val, vec = np.linalg.eigh(xpx) ind = np.argsort(val) ind = ind[::-1] val = val[ind] vec = vec[:, ind] assert_allclose(xdm, pc.transformed_data) assert_allclose(val, pc.eigenvals) assert_allclose(np.abs(vec), np.abs(pc.eigenvecs)) assert_allclose(np.abs(pc.factors), np.abs(xdm.dot(vec))) assert_allclose(pc.projection, xdm + mu) pc = PCA(self.x, standardize=False, demean=False, normalize=False) x = self.x xpx = x.T.dot(x) val, vec = np.linalg.eigh(xpx) ind = np.argsort(val) ind = ind[::-1] val = val[ind] vec = vec[:, ind] assert_allclose(x, pc.transformed_data) assert_allclose(val, pc.eigenvals) assert_allclose(np.abs(vec), np.abs(pc.eigenvecs)) assert_allclose(np.abs(pc.factors), np.abs(x.dot(vec))) def test_against_reference(self): """ Test against MATLAB, which by default demeans but does not standardize """ x = data.xo / 1000.0 pc = PCA(x, normalize=False, standardize=False) ref = princomp1 assert_allclose(np.abs(pc.factors), np.abs(ref.factors)) assert_allclose(pc.factors.dot(pc.coeff) + x.mean(0), x) assert_allclose(np.abs(pc.coeff), np.abs(ref.coef.T)) assert_allclose(pc.factors.dot(pc.coeff), ref.factors.dot(ref.coef.T)) pc = PCA(x[:20], normalize=False, standardize=False) mu = x[:20].mean(0) ref = princomp2 assert_allclose(np.abs(pc.factors), np.abs(ref.factors)) assert_allclose(pc.factors.dot(pc.coeff) + mu, x[:20]) assert_allclose(np.abs(pc.coeff), np.abs(ref.coef.T)) assert_allclose(pc.factors.dot(pc.coeff), ref.factors.dot(ref.coef.T)) def test_warnings_and_errors(self): with warnings.catch_warnings(record=True) as w: pc = PCA(self.x, ncomp=300) assert_equal(len(w), 1) with warnings.catch_warnings(record=True) as w: rs = self.rs x = rs.standard_normal((200, 1)) * np.ones(200) pc = PCA(x, method='eig') assert_equal(len(w), 1) assert_raises(ValueError, PCA, self.x, method='unknown') assert_raises(ValueError, PCA, self.x, missing='unknown') assert_raises(ValueError, PCA, self.x, tol=2.0) assert_raises(ValueError, PCA, np.nan * np.ones((200,100)), tol=2.0) @skipif(missing_matplotlib) def test_pandas(self): pc = PCA(pd.DataFrame(self.x)) pc1 = PCA(self.x) assert_equal(pc.factors.values, pc1.factors) fig = pc.plot_scree() fig = pc.plot_scree(ncomp=10) fig = pc.plot_scree(log_scale=False) fig = pc.plot_rsquare() fig = pc.plot_rsquare(ncomp=5) proj = pc.project(2) PCA(pd.DataFrame(self.x), ncomp=4, gls=True) PCA(pd.DataFrame(self.x), ncomp=4, standardize=False) def test_gls_and_weights(self): assert_raises(ValueError, PCA, self.x, gls=True) assert_raises(ValueError, PCA, self.x, weights=np.array([1.0, 1.0])) # Pre-standardize to make comparison simple x = (self.x - self.x.mean(0)) x = x / (x ** 2.0).mean(0) pc_gls = PCA(x, ncomp=1, standardize=False, demean=False, gls=True) pc = PCA(x, ncomp=1, standardize=False, demean=False) errors = x - pc.projection var = (errors ** 2.0).mean(0) weights = 1.0 / var weights = weights / np.sqrt((weights ** 2.0).mean()) assert_allclose(weights, pc_gls.weights) assert_equal(x, pc_gls.data) assert_equal(x, pc.data) pc_weights = PCA(x, ncomp=1, standardize=False, demean=False, weights=weights) assert_allclose(weights, pc_weights.weights) assert_allclose(np.abs(pc_weights.factors), np.abs(pc_gls.factors)) def test_wide(self): pc = PCA(self.x_wide) assert_equal(pc.factors.shape[1], self.x_wide.shape[0]) assert_equal(pc.eigenvecs.shape[1], min(np.array(self.x_wide.shape))) pc = PCA(pd.DataFrame(self.x_wide)) assert_equal(pc.factors.shape[1], self.x_wide.shape[0]) assert_equal(pc.eigenvecs.shape[1], min(np.array(self.x_wide.shape))) def test_projection(self): pc = PCA(self.x, ncomp=5) mu = self.x.mean(0) demean_x = self.x - mu coef = np.linalg.pinv(pc.factors).dot(demean_x) direct = pc.factors.dot(coef) assert_allclose(pc.projection, direct + mu) pc = PCA(self.x, standardize=False, ncomp=5) coef = np.linalg.pinv(pc.factors).dot(demean_x) direct = pc.factors.dot(coef) assert_allclose(pc.projection, direct + mu) pc = PCA(self.x, standardize=False, demean=False, ncomp=5) coef = np.linalg.pinv(pc.factors).dot(self.x) direct = pc.factors.dot(coef) assert_allclose(pc.projection, direct) pc = PCA(self.x, ncomp=5, gls=True) mu = self.x.mean(0) demean_x = self.x - mu coef = np.linalg.pinv(pc.factors).dot(demean_x) direct = pc.factors.dot(coef) assert_allclose(pc.projection, direct + mu) pc = PCA(self.x, standardize=False, ncomp=5) coef = np.linalg.pinv(pc.factors).dot(demean_x) direct = pc.factors.dot(coef) assert_allclose(pc.projection, direct + mu) pc = PCA(self.x, standardize=False, demean=False, ncomp=5, gls=True) coef = np.linalg.pinv(pc.factors).dot(self.x) direct = pc.factors.dot(coef) assert_allclose(pc.projection, direct) # Test error for too many factors project = pc.project assert_raises(ValueError, project, 6) def test_replace_missing(self): x = self.x.copy() x[::5, ::7] = np.nan pc = PCA(x, missing='drop-row') x_dropped_row = x[np.logical_not(np.any(np.isnan(x), 1))] pc_dropped = PCA(x_dropped_row) assert_equal(pc.projection, pc_dropped.projection) assert_equal(x, pc.data) pc = PCA(x, missing='drop-col') x_dropped_col = x[:, np.logical_not(np.any(np.isnan(x), 0))] pc_dropped = PCA(x_dropped_col) assert_equal(pc.projection, pc_dropped.projection) assert_equal(x, pc.data) pc = PCA(x, missing='drop-min') if x_dropped_row.size > x_dropped_col.size: x_dropped_min = x_dropped_row else: x_dropped_min = x_dropped_col pc_dropped = PCA(x_dropped_min) assert_equal(pc.projection, pc_dropped.projection) assert_equal(x, pc.data) pc = PCA(x, ncomp=3, missing='fill-em') missing = np.isnan(x) mu = nanmean(x, axis=0) errors = x - mu sigma = np.sqrt(nanmean(errors ** 2, axis=0)) x_std = errors / sigma x_std[missing] = 0.0 last = x_std[missing] delta = 1.0 count = 0 while delta > 5e-8: pc_temp = PCA(x_std, ncomp=3, standardize=False, demean=False) x_std[missing] = pc_temp.projection[missing] current = x_std[missing] diff = current - last delta = np.sqrt(np.sum(diff ** 2)) / np.sqrt(np.sum(current ** 2)) last = current count += 1 x = self.x + 0.0 projection = pc_temp.projection * sigma + mu x[missing] = projection[missing] assert_allclose(pc._adjusted_data, x) # Check data for no changes assert_equal(self.x, self.x_copy) x = self.x pc = PCA(x) pc_dropped = PCA(x, missing='drop-row') assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5) pc_dropped = PCA(x, missing='drop-col') assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5) pc_dropped = PCA(x, missing='drop-min') assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5) pc = PCA(x, ncomp=3) pc_dropped = PCA(x, ncomp=3, missing='fill-em') assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5) # Test too many missing for missing='fill-em' x = self.x.copy() x[:, :] = np.nan assert_raises(ValueError, PCA, x, missing='drop-row') assert_raises(ValueError, PCA, x, missing='drop-col') assert_raises(ValueError, PCA, x, missing='drop-min') assert_raises(ValueError, PCA, x, missing='fill-em') def test_rsquare(self): x = self.x + 0.0 mu = x.mean(0) x_demean = x - mu std = np.std(x, 0) x_std = x_demean / std pc = PCA(self.x) nvar = x.shape[1] rsquare = np.zeros(nvar + 1) tss = np.sum(x_std ** 2) for i in range(nvar + 1): errors = x_std - pc.project(i, transform=False, unweight=False) rsquare[i] = 1.0 - np.sum(errors ** 2) / tss assert_allclose(rsquare, pc.rsquare) pc = PCA(self.x, standardize=False) tss = np.sum(x_demean ** 2) for i in range(nvar + 1): errors = x_demean - pc.project(i, transform=False, unweight=False) rsquare[i] = 1.0 - np.sum(errors ** 2) / tss assert_allclose(rsquare, pc.rsquare) pc = PCA(self.x, standardize=False, demean=False) tss = np.sum(x ** 2) for i in range(nvar + 1): errors = x - pc.project(i, transform=False, unweight=False) rsquare[i] = 1.0 - np.sum(errors ** 2) / tss assert_allclose(rsquare, pc.rsquare)
bsd-3-clause
shansixiong/geosearch
geosearch/extract.py
2
3972
from bs4 import BeautifulSoup import urllib.request import re import pandas as pd import time def timeit(method): def timed(*args, **kw): ts = time.time() result = method(*args, **kw) te = time.time() print('%r %2.2f sec' % \ (method.__name__, te - ts)) return result return timed def next_page(url): '''Given the url of a geonames.org page, find the next page. If the total exceed 5000, the function will stop, because geonames.org does not support search over 5000''' geonames = urllib.request.urlopen(url).read() soup = BeautifulSoup(geonames, "lxml") soup = soup.find("div", id="search") link = soup.find_all('a', href=True) next_or_pre = link[-1].getText() if "next >" != next_or_pre: return innerlink = link[-1]["href"] '''extract the startrow of the next page''' start_page = re.search(r"startRow=(.*?)$", innerlink) if start_page is None: print("This is the last page") return start_page = innerlink[start_page.start() + 9: start_page.end()] if int(start_page) > 5000: print("Searching Exceed 5000") return print(innerlink) link = "http://www.geonames.org" + innerlink + "&maxRows=500" return link @timeit def get_page(url): '''Given the url of a geonames.org page, parse the data from the webpage''' country = [] geonames = urllib.request.urlopen(url).read() soup = BeautifulSoup(geonames, "lxml") soup = soup.find("div", id="search") countries = soup.find_all("table", class_="restable") table = countries[1].find_all("tr") for rows in table: try: cur_name = rows.find_all("td")[1] cur_feature = rows.find_all("td")[3] except IndexError: continue if cur_name is None or cur_feature is None: continue cur_name_ls = cur_name.find_all("a", href=True) cur_name = cur_name_ls[0] if len(cur_name_ls) == 1: cur_wiki_link = None else: cur_wiki_link = cur_name_ls[1] if cur_name is None: cur_name = "None" else: cur_name = cur_name.getText() if cur_wiki_link is None: cur_wiki_link = "None" else: cur_wiki_link = cur_wiki_link["href"] if "http://en.wikipedia.org/wiki/" not in cur_wiki_link: cur_wiki_link = "None" cur_feature = cur_feature.getText() cur_feature = re.split("population|elevation", cur_feature)[0] country.append([cur_name, cur_wiki_link, cur_feature]) return country def get_country(country_name): '''Get top 5000 place for each country''' country_ls = [] if not isinstance(country_name, str): country_name = "NA" url = "http://www.geonames.org/advanced-search.html?q=&featureClass=A&startRow=0&maxRows=500&country=" + country_name nextpage = url while True: country_ls += get_page(nextpage) print(0) nextpage = next_page(nextpage) if nextpage is None: break region = [] wiki_link = [] feature = [] for x in country_ls: region.append(x[0]) wiki_link.append(x[1]) feature.append(x[2]) return [country_name, region, wiki_link, feature] def get_all_countries(): df_country = pd.DataFrame(pd.read_csv("all_countries.csv")) countries = df_country["abb"] total_ls = [] for country in countries: print(country) if not isinstance(country, str): print(country) total_ls.append(get_country(country)) df = pd.DataFrame(total_ls, columns=["country", "region", "wiki_link", "category"]) df.to_csv("database.csv") df.to_json("database.json") get_all_countries() def get_index(ls, ele): for i in range(0, len(ls)): if ele == ls[i]: return i return
mit
automl/paramsklearn
ParamSklearn/components/regression/decision_tree.py
1
4321
import numpy as np from HPOlibConfigSpace.configuration_space import ConfigurationSpace from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \ UniformIntegerHyperparameter, CategoricalHyperparameter, \ UnParametrizedHyperparameter, Constant from ParamSklearn.components.base import \ ParamSklearnRegressionAlgorithm from ParamSklearn.constants import * # get our own forests to replace the sklearn ones from sklearn.tree import DecisionTreeRegressor class DecisionTree(ParamSklearnRegressionAlgorithm): def __init__(self, criterion, splitter, max_features, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_leaf_nodes, random_state=None): self.criterion = criterion self.splitter = splitter self.max_features = max_features self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.max_leaf_nodes = max_leaf_nodes self.min_weight_fraction_leaf = min_weight_fraction_leaf self.random_state = random_state self.estimator = None def fit(self, X, y, sample_weight=None): self.max_features = float(self.max_features) if self.max_depth == "None": self.max_depth = None else: num_features = X.shape[1] max_depth = max(1, int(np.round(self.max_depth * num_features, 0))) self.min_samples_split = int(self.min_samples_split) self.min_samples_leaf = int(self.min_samples_leaf) if self.max_leaf_nodes == "None": self.max_leaf_nodes = None else: self.max_leaf_nodes = int(self.max_leaf_nodes) self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf) self.estimator = DecisionTreeRegressor( criterion=self.criterion, max_depth=max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, max_leaf_nodes=self.max_leaf_nodes, random_state=self.random_state) self.estimator.fit(X, y, sample_weight=sample_weight) return self def predict(self, X): if self.estimator is None: raise NotImplementedError return self.estimator.predict(X) @staticmethod def get_properties(dataset_properties=None): return {'shortname': 'DT', 'name': 'Decision Tree Classifier', 'handles_missing_values': False, 'handles_nominal_values': False, 'handles_numerical_features': True, 'prefers_data_scaled': False, # TODO find out if this is good because of sparcity... 'prefers_data_normalized': False, 'handles_regression': True, 'handles_classification': False, 'handles_multiclass': False, 'handles_multilabel': False, 'is_deterministic': False, 'handles_sparse': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (PREDICTIONS,), # TODO find out what is best used here! # But rather fortran or C-contiguous? 'preferred_dtype': np.float32} @staticmethod def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() criterion = cs.add_hyperparameter(Constant('criterion', 'mse')) splitter = cs.add_hyperparameter(Constant("splitter", "best")) max_features = cs.add_hyperparameter(Constant('max_features', 1.0)) max_depth = cs.add_hyperparameter(UniformFloatHyperparameter( 'max_depth', 0., 2., default=0.5)) min_samples_split = cs.add_hyperparameter(UniformIntegerHyperparameter( "min_samples_split", 2, 20, default=2)) min_samples_leaf = cs.add_hyperparameter(UniformIntegerHyperparameter( "min_samples_leaf", 1, 20, default=1)) min_weight_fraction_leaf = cs.add_hyperparameter( Constant("min_weight_fraction_leaf", 0.0)) max_leaf_nodes = cs.add_hyperparameter( UnParametrizedHyperparameter("max_leaf_nodes", "None")) return cs
bsd-3-clause
seba-1511/stockMarket
Scripts/plot.py
2
1760
#-*- coding: utf-8 -*- import matplotlib.pyplot as uniquePyPlot def plot(x, y, title='', xlabel='', ylabel=''): uniquePyPlot.clf() figure = uniquePyPlot figure.scatter(x, y) figure.title(title) figure.xlabel(xlabel) figure.ylabel(ylabel) figure.autoscale(tight=True) figure.grid() figure.savefig(title + '.svg', format='svg') #Plots multiple arrays.(Max. 5) # plotArray[0][i]: X values for plot n°i # plotArray[1][i]: Y values for plot n°i def multiPlot(plotArray, title='', xlabel='', ylabel=''): uniquePyPlot.clf() figure = uniquePyPlot figure.title(title) figure.xlabel(xlabel) figure.ylabel(ylabel) markers = 'xo><v' colors = 'rbgyo' if len(plotArray[0]) > 5: print 'More than 5 plots given as parameters (plotArray)' return False for i in xrange(len(plotArray[0])): figure.scatter( plotArray[0][i], plotArray[1][i], marker = markers[i], c = colors[i] ) figure.autoscale(tight=True) figure.grid() figure.savefig(title + '.svg', format='svg') def plotLines(plotArray, title='', xlabel='', ylabel=''): uniquePyPlot.clf() figure = uniquePyPlot figure.title(title) figure.xlabel(xlabel) figure.ylabel(ylabel) markers = '+,.1234' colors = 'rbgyo' if len(plotArray[0]) > 5: print 'More than 5 plots given as parameters (plotArray)' return False for i in xrange(len(plotArray[0])): figure.plot( plotArray[0][i], plotArray[1][i], marker = markers[i], c = colors[i] ) # figure.autoscale(tight=True) figure.grid() figure.savefig(title + '.svg', format='svg')
mit
vivekmishra1991/scikit-learn
examples/decomposition/plot_ica_blind_source_separation.py
349
2228
""" ===================================== Blind source separation using FastICA ===================================== An example of estimating sources from noisy data. :ref:`ICA` is used to estimate sources given noisy measurements. Imagine 3 instruments playing simultaneously and 3 microphones recording the mixed signals. ICA is used to recover the sources ie. what is played by each instrument. Importantly, PCA fails at recovering our `instruments` since the related signals reflect non-Gaussian processes. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import signal from sklearn.decomposition import FastICA, PCA ############################################################################### # Generate sample data np.random.seed(0) n_samples = 2000 time = np.linspace(0, 8, n_samples) s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal S = np.c_[s1, s2, s3] S += 0.2 * np.random.normal(size=S.shape) # Add noise S /= S.std(axis=0) # Standardize data # Mix data A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations # Compute ICA ica = FastICA(n_components=3) S_ = ica.fit_transform(X) # Reconstruct signals A_ = ica.mixing_ # Get estimated mixing matrix # We can `prove` that the ICA model applies by reverting the unmixing. assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_) # For comparison, compute PCA pca = PCA(n_components=3) H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components ############################################################################### # Plot results plt.figure() models = [X, S, S_, H] names = ['Observations (mixed signal)', 'True Sources', 'ICA recovered signals', 'PCA recovered signals'] colors = ['red', 'steelblue', 'orange'] for ii, (model, name) in enumerate(zip(models, names), 1): plt.subplot(4, 1, ii) plt.title(name) for sig, color in zip(model.T, colors): plt.plot(sig, color=color) plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46) plt.show()
bsd-3-clause
jordancheah/zipline
tests/utils/test_factory.py
34
2175
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase import pandas as pd import pytz import numpy as np from zipline.utils.factory import (load_from_yahoo, load_bars_from_yahoo) class TestFactory(TestCase): def test_load_from_yahoo(self): stocks = ['AAPL', 'GE'] start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc) end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc) data = load_from_yahoo(stocks=stocks, start=start, end=end) assert data.index[0] == pd.Timestamp('1993-01-04 00:00:00+0000') assert data.index[-1] == pd.Timestamp('2001-12-31 00:00:00+0000') for stock in stocks: assert stock in data.columns np.testing.assert_raises( AssertionError, load_from_yahoo, stocks=stocks, start=end, end=start ) def test_load_bars_from_yahoo(self): stocks = ['AAPL', 'GE'] start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc) end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc) data = load_bars_from_yahoo(stocks=stocks, start=start, end=end) assert data.major_axis[0] == pd.Timestamp('1993-01-04 00:00:00+0000') assert data.major_axis[-1] == pd.Timestamp('2001-12-31 00:00:00+0000') for stock in stocks: assert stock in data.items for ohlc in ['open', 'high', 'low', 'close', 'volume', 'price']: assert ohlc in data.minor_axis np.testing.assert_raises( AssertionError, load_bars_from_yahoo, stocks=stocks, start=end, end=start )
apache-2.0
tejasckulkarni/hydrology
ch_591/ch_591_ver_2_ daily.py
2
39683
__author__ = 'kiruba' import numpy as np import matplotlib.pyplot as plt import pandas as pd import itertools from spread import spread from scipy.optimize import curve_fit import math from matplotlib import rc import email.utils as eutils import time import datetime from datetime import timedelta import scipy as sp import meteolib as met import evaplib from bisect import bisect_left import matplotlib as mpl import Pysolar as ps # latex parameters rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}) rc('text', usetex=True) plt.rc('text', usetex=True) plt.rc('font', family='serif', size=18) # Weather file weather_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/corrected_weather.csv' # Rain file rain_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/corrected_rain.csv' # convert to pandas dataframe weather_df = pd.read_csv(weather_file, sep=',', header=0) # set index date_format = '%Y-%m-%d %H:%M:%S' daily_format = '%Y-%m-%d' # print weather_df.columns.values[0] # weather_df.columns.values[0] = 'Date_Time' # print weather_df.head() weather_df['Date_Time'] = pd.to_datetime(weather_df['Date_Time'], format=date_format) weather_df.set_index(weather_df['Date_Time'], inplace=True) # sort based on index weather_df.sort_index(inplace=True) # drop date time column weather_df = weather_df.drop('Date_Time', 1) # print weather_df.head() # print weather_df['2014-06-30'] # print weather_df.head() # Rain data frame rain_df = pd.read_csv(rain_file, sep=',', header=0) # set index rain_df['Date_Time'] = pd.to_datetime(rain_df['Date_Time'], format=date_format) rain_df.set_index(rain_df['Date_Time'], inplace=True) # sort based on index rain_df.sort_index(inplace=True) # drop date time column rain_df = rain_df.drop('Date_Time', 1) # print rain_df.head() """ Check dam calibration """ # Polynomial fitting function def polyfit(x, y, degree): results = {} coeffs = np.polyfit(x, y, degree) results['polynomial'] = coeffs.tolist() #r squared p = np.poly1d(coeffs) yhat = p(x) ybar = np.sum(y)/len(y) ssreg = np.sum((yhat-ybar)**2) sstot = np.sum((y-ybar)**2) results['determination'] = ssreg/sstot return results #check dam calibration values y_cal = [100, 400, 1000, 1600, 2250, 2750, 3000] x_cal = [2036, 2458, 3025, 4078, 5156, 5874, 6198] a_stage = polyfit(x_cal, y_cal, 1) # coefficients of polynomial are stored in following list coeff_cal = a_stage['polynomial'] def myround(a, decimals=1): return np.around(a-10**(-(decimals+5)), decimals=decimals) resolution_ody = 0.0008 def read_correct_ch_dam_data(csv_file): """ Function to read, calibrate and convert time format (day1 24:00:00 to day 2 00:00:00) in check dam data :param csv_file: :return: calibrated and time corrected data """ water_level = pd.read_csv(csv_file, skiprows=9, sep=',', header=0, names=['scan no', 'date', 'time', 'raw value', 'calibrated value']) water_level['calibrated value'] = (water_level['raw value'] *coeff_cal[0]) + coeff_cal[1] #in cm water_level['calibrated value'] = np.round(water_level['calibrated value']/resolution_ody)*resolution_ody water_level['calibrated value'] /= 1000.0 water_level['calibrated value'] = myround(a=water_level['calibrated value'], decimals=2) # #change the column name water_level.columns.values[4] = 'stage(m)' # create date time index format = '%d/%m/%Y %H:%M:%S' c_str = ' 24:00:00' for index, row in water_level.iterrows(): x_str = row['time'] if x_str == c_str: # convert string to datetime object r_date = pd.to_datetime(row['date'], format='%d/%m/%Y ') # add 1 day c_date = r_date + timedelta(days=1) # convert datetime to string c_date = c_date.strftime('%d/%m/%Y ') c_time = ' 00:00:00' water_level['date'][index] = c_date water_level['time'][index] = c_time water_level['date_time'] = pd.to_datetime(water_level['date'] + water_level['time'], format=format) water_level.set_index(water_level['date_time'], inplace=True) # # drop unneccessary columns before datetime aggregation water_level.drop(['scan no', 'date', 'time', 'raw value', 'date_time'], inplace=True, axis=1) return water_level ## Read check dam data block_1 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_001.CSV' water_level_1 = read_correct_ch_dam_data(block_1) block_2 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_002.CSV' water_level_2 = read_correct_ch_dam_data(block_2) block_3 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_003.CSV' water_level_3 = read_correct_ch_dam_data(block_3) block_4 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_004.CSV' water_level_4 = read_correct_ch_dam_data(block_4) block_5 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_005.CSV' water_level_5 = read_correct_ch_dam_data(block_5) block_6 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_006.CSV' water_level_6 = read_correct_ch_dam_data(block_6) water_level = pd.concat([water_level_1, water_level_2, water_level_3, water_level_4, water_level_5, water_level_6], axis=0) water_level = water_level.sort() # water_level = water_level['2014-05-14 18:30:00':'2014-09-10 23:30:00'] """ Fill in missing values interpolate """ start_time = min(water_level.index) end_time = max(water_level.index) new_index = pd.date_range(start=start_time, end=end_time, freq='30min') water_level = water_level.reindex(new_index, method=None) water_level = water_level.interpolate(method='time') new_index = pd.date_range(start=start_time.strftime('%Y-%m-%d %H:%M'), end=end_time.strftime('%Y-%m-%d %H:%M'), freq='30Min') water_level = water_level.set_index(new_index) water_level.index.name = 'Date' # raise SystemExit(0) """ Join weather and rain data """ weather_df = weather_df.join(rain_df, how='right') weather_df = weather_df[min(water_level.index).strftime(daily_format): max(water_level.index).strftime(daily_format)] # print weather_df['2014-06-30'] # weather_df = weather_df[min(water_level.index): max(water_level.index)] weather_df = weather_df.join(water_level, how='right') # print weather_df['2014-06-30'] # print weather_df.head(20) # weather_df.to_csv() """ Remove Duplicates """ # check for duplicates # df2 = dry_weather.groupby(level=0).filter(lambda x: len(x) > 1) # print(df2) weather_df['index'] = weather_df.index weather_df.drop_duplicates(subset='index', take_last=True, inplace=True) del weather_df['index'] weather_df = weather_df.sort() """ Evaporation from open water Equation according to J.D. Valiantzas (2006). Simplified versions for the Penman evaporation equation using routine weather data. J. Hydrology 331: 690-702. Following Penman (1948,1956). Albedo set at 0.06 for open water. Input (measured at 2 m height): - airtemp: (array of) average air temperatures [Celsius] - rh: (array of) average relative humidity [%] - airpress: (array of) average air pressure data [Pa] - Rs: (array of) incoming solar radiation [J/m2/day] - N: (array of) maximum daily sunshine hours [h] - Rext: (array of) daily extraterrestrial radiation [J/m2/day] - u: (array of) daily average wind speed at 2 m [m/s] - Z: (array of) site elevation [m a.s.l.], default is zero... Output: - E0: (array of) Penman open water evaporation values [mm/day] """ """ air pressure (Pa) = 101325(1-2.25577 10^-5 h)^5.25588 h = altitude above sea level (m) http://www.engineeringtoolbox.com/air-altitude-pressure-d_462.html mean elevation over watershed = 803.441589 m Elevation at the check dam = 799 m """ z = 799 p = (1-(2.25577*(10**-5)*z)) air_p_pa = 101325*(p**5.25588) # give air pressure value weather_df['AirPr(Pa)'] = air_p_pa """ Half hourly Extraterrestrial Radiation Calculation(J/m2/30min) """ SC_default = 1367.0 # Solar constant in W/m^2 is 1367.0. def extraterrestrial_irrad(local_datetime, latitude_deg, longitude_deg): """ Calculates extraterrestrial radiation in MJ/m2/timeperiod :param local_datetime: datetime object :param latitude_deg: in decimal degree :param longitude_deg: in decimal degree :return: Extra terrestrial radiation in MJ/m2/timeperiod """ S = 0.0820 # MJ m-2 min-1 lat_rad = latitude_deg*(math.pi/180) day = ps.solar.GetDayOfYear(local_datetime) hour = float(local_datetime.hour) minute = float(local_datetime.minute) b = ((2*math.pi)*(day-81))/364 sc = 0.1645*(math.sin(2*b)) - 0.1255*(math.cos(b)) - 0.025*(math.sin(b)) # seasonal correction in hour lz = 270 # for India longitude of local time zone in degrees west of greenwich lm = (180+(180-longitude_deg)) # longitude of measurement site t = (hour + (minute/60)) - 0.25 t1 = 0.5 # 0.5 for 30 minute 1 for hourly period w = (math.pi/12)*((t + (0.0667*(lz-lm))+ sc) - 12) w1 = w - ((math.pi*t1)/24) # solar time angle at beginning of period [rad] w2 = w + ((math.pi*t1)/24) # solar time angle at end of period [rad] dr = 1 + (0.033*math.cos((2*math.pi*day)/365)) # inverse relative distance Earth-Sun dt = 0.409*math.sin(((2*math.pi*day)/365) - 1.39) # solar declination in radian ws = math.acos(-math.tan(lat_rad)*math.tan(dt)) if (w > ws) or (w < -ws): Rext = 0.0 else: Rext = ((12*60)/math.pi)*S*dr*(((w2-w1)*math.sin(lat_rad)*math.sin(dt))+(math.cos(lat_rad)*math.cos(dt)*(math.sin(w2) - math.sin(w1)))) # MJm-2(30min)-1 return Rext ch_591_lat = 13.260196 ch_591_long = 77.512085 weather_df['Rext (MJ/m2/30min)'] = 0.000 for i in weather_df.index: weather_df['Rext (MJ/m2/30min)'][i.strftime('%Y-%m-%d %H:%M:%S')] = (extraterrestrial_irrad(local_datetime=i, latitude_deg=ch_591_lat, longitude_deg=ch_591_long)) # weather_df['Rext (MJ/m2/30min)'] = # weather_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/weather.csv') """ wind speed from km/h to m/s 1 kmph = 0.277778 m/s """ weather_df['Wind Speed (mps)'] = weather_df['Wind Speed (kmph)'] * 0.277778 """ Radiation unit conversion """ # the radiation units are actually in W/m2 and # not in W/mm2 as given by weather station, # so multiply with 30*60 seconds # to convert to MJ divide by 10^6 weather_df['Solar Radiation (MJ/m2/30min)'] = (weather_df['Solar Radiation (W/m2)'] * 1800)/(10**6) """ Average Temperature Calculation """ weather_df['Average Temp (C)'] = 0.5*(weather_df['Min Air Temperature (C)'] + weather_df['Max Air Temperature (C)']) weather_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/weather.csv') """ Open water evaporation function for half hour Modified from evaplib.py http://python.hydrology-amsterdam.nl/moduledoc/index.html#module-evaplib """ def delta_calc(airtemp): """ Calculates slope of saturation vapour pressure curve at air temperature [kPa/Celsius] http://www.fao.org/docrep/x0490e/x0490e07.htm :param airtemp: Temperature in Celsius :return: slope of saturation vapour pressure curve [kPa/Celsius] """ l = sp.size(airtemp) if l < 2: temp = airtemp + 237.3 b = 0.6108*(math.exp((17.27*airtemp)/temp)) delta = (4098*b)/(temp**2) else: delta = sp.zeros(l) for i in range(0, l): temp = airtemp[i] + 237.3 b = 0.6108*(math.exp(17.27*airtemp[i])/temp) delta[i] = (4098*b)/(temp**2) return delta def half_hour_E0(airtemp = sp.array([]), rh = sp.array([]), airpress = sp.array([]), Rs = sp.array([]), Rext = sp.array([]), u =sp.array([]), Z=0.0): """ Function to calculate daily Penman open water evaporation (in mm/30min). Equation according to Shuttleworth, W. J. 2007. "Putting the 'Vap' into Evaporation." Hydrology and Earth System Sciences 11 (1): 210-44. doi:10.5194/hess-11-210-2007. :param airtemp: average air temperature [Celsius] :param rh: relative humidity[%] :param airpress: average air pressure[Pa] :param Rs: Incoming solar radiation [MJ/m2/30min] :param Rext: Extraterrestrial radiation [MJ/m2/30min] :param u: average wind speed at 2 m from ground [m/s] :param Z: site elevation, default is zero [metre] :return: Penman open water evaporation values [mm/30min] """ # Set constants albedo = 0.06 # open water albedo # Stefan boltzmann constant = 5.670373*10-8 J/m2/k4/s # http://en.wikipedia.org/wiki/Stefan-Boltzmann_constant # sigma = 5.670373*(10**-8) # J/m2/K4/s sigma = (1.02066714*(10**-10)) #Stefan Boltzmann constant MJ/m2/K4/30min # Calculate Delta, gamma and lambda DELTA = delta_calc(airtemp) # [Kpa/C] # Lambda = met.L_calc(airtemp)/(10**6) # [MJ/Kg] # gamma = met.gamma_calc(airtemp, rh, airpress)/1000 # Lambda = 2.501 -(0.002361*airtemp) # [MJ/kg] # gamma = (0.0016286 *(airpress/1000))/Lambda # Calculate saturated and actual water vapour pressure es = met.es_calc(airtemp) # [Pa] ea = met.ea_calc(airtemp,rh) # [Pa] #Determine length of array l = sp.size(airtemp) #Check if we have a single value or an array if l < 2: Lambda = 2.501 -(0.002361*airtemp) # [MJ/kg] gamma = (0.0016286 *(airpress/1000))/Lambda Rns = (1.0 - albedo)* Rs # shortwave component [MJ/m2/30min] #calculate clear sky radiation Rs0 Rs0 = (0.75+(2E-5*Z))*Rext f = (1.35*(Rs/Rs0))-0.35 epsilom = 0.34-(-0.14*sp.sqrt(ea/1000)) Rnl = f*epsilom*sigma*(airtemp+273.16)**4 # Longwave component [MJ/m2/30min] Rnet = Rns - Rnl Ea = (1 + (0.536*u))*((es/1000)-(ea/1000)) E0 = ((DELTA*Rnet) + gamma*(6.43*Ea))/(Lambda*(DELTA+gamma)) else: # Inititate output array E0 = sp.zeros(l) Rns = sp.zeros(l) Rs0 = sp.zeros(l) f = sp.zeros(l) epsilom = sp.zeros(l) Rnl = sp.zeros(l) Rnet = sp.zeros(l) Ea = sp.zeros(l) Lambda = sp.zeros(l) gamma = sp.zeros(l) for i in range(0,l): Lambda[i] = 2.501 -(0.002361*airtemp[i]) gamma[i] = (0.0016286 *(airpress[i]/1000))/Lambda[i] # calculate longwave radiation (MJ/m2/30min) Rns[i] = (1.0 - albedo) * Rs[i] # calculate clear sky radiation Rs0 Rs0[i] = (0.75 + (2E-5*Z)) f[i] = (1.35*(Rs[i]/Rs0[i]))-0.35 epsilom[i] = 0.34-(-0.14*sp.sqrt(ea[i]/1000)) Rnl[i] = f[i]*epsilom[i]*sigma*(airtemp[i]+273.16)**4 # Longwave component [MJ/m2/30min] Rnet[i] = Rns[i] - Rnl[i] Ea[i] = (1 + (0.536*u[i]))*((es[i]/1000)-(ea[i]/1000)) E0[i] = ((DELTA[i]*Rnet[i]) + gamma[i]*(6.43*Ea[i]))/(Lambda[i]*(DELTA[i]+gamma[i])) return E0 """ Half hourly Evaporation calculation """ airtemp = weather_df['Average Temp (C)'] hum = weather_df['Humidity (%)'] airpress = weather_df['AirPr(Pa)'] rs = weather_df['Solar Radiation (MJ/m2/30min)'] rext = weather_df['Rext (MJ/m2/30min)'] wind_speed = weather_df['Wind Speed (mps)'] weather_df['Evaporation (mm/30min)'] = half_hour_E0(airtemp=airtemp, rh=hum, airpress=airpress, Rs=rs, Rext=rext, u=wind_speed, Z=z) """ Plot Evaporation """ fig = plt.figure(figsize=(11.69, 8.27)) plt.plot_date(weather_df.index, weather_df['Evaporation (mm/30min)'], '-g', label='Evaporation (mm/30min)') plt.ylabel(r'\textbf{Evaporation ($mm/30min$)}') fig.autofmt_xdate(rotation=90) plt.title(r"Daily Evaporation for Check Dam - 591", fontsize=20) plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/evaporation_591_30min') # bar plots weather_sel_df = weather_df['2014-05-20':'2014-05-22'] fig = plt.figure(figsize=(11.69, 8.27)) plt.plot_date(weather_sel_df.index, weather_sel_df['Evaporation (mm/30min)'], '-g') fig.autofmt_xdate(rotation=90) plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/new_evaporation_591_may20_22') # plt.show() # """ # Remove Duplicates # """ # # check for duplicates # # df2 = dry_weather.groupby(level=0).filter(lambda x: len(x) > 1) # # print(df2) # weather_df['index'] = weather_df.index # weather_df.drop_duplicates(subset='index', take_last=True, inplace=True) # del weather_df['index'] # weather_df = weather_df.sort() """ Stage Volume relation estimation from survey data """ # neccessary functions def pairwise(iterable): """s -> (s0,s1), (s1,s2), (s2,s3), ...""" a, b = itertools.tee(iterable) next(b, None) return itertools.izip(a, b) """ Select data where stage is available, Remove Overflowing days """ weather_stage_avl_df = weather_df[min(water_level.index):max(water_level.index)] """ Convert observed stage to volume by linear interpolation """ # set stage as index stage_vol_df = pd.read_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/stage_vol_new.csv', sep=',', header=0, names=['sno', 'stage_m', 'total_vol_cu_m']) # print stage_vol_df stage_vol_df.drop('sno', inplace=True, axis=1) stage_vol_df.set_index(stage_vol_df['stage_m'], inplace=True) # function to find containing intervals def find_range(array, ab): if ab < max(array): start = bisect_left(array, ab) return array[start-1], array[start] else: return min(array), max(array) # print weather_stage_avl_df.head() water_balance_df = weather_stage_avl_df[['Rain Collection (mm)', 'Evaporation (mm/30min)', 'stage(m)']] # print find_range(stage_vol_df['stage_m'].tolist(), max(water_balance_df['stage(m)'])) water_balance_df['volume (cu.m)'] = 0.000 stage_cutoff = 0.1 for index, row in water_balance_df.iterrows(): # print index obs_stage = row['stage(m)'] # observed stage if obs_stage >= stage_cutoff: x1, x2 = find_range(stage_vol_df['stage_m'].tolist(), obs_stage) x_diff = x2-x1 y1 = stage_vol_df['total_vol_cu_m'][x1] y2 = stage_vol_df['total_vol_cu_m'][x2] y_diff = y2 - y1 slope = y_diff/x_diff y_intercept = y2 - (slope*x2) water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = (slope*obs_stage) + y_intercept # fig = plt.figure(figsize=(11.69, 8.27)) # plt.plot_date(water_balance_df.index, water_balance_df['volume (cu.m)'], '-g') # plt.hlines(stage_vol_df['total_vol_cu_m'][1.9], min(water_balance_df.index), max(water_balance_df.index)) # plt.title('before overflow correction') water_balance_df['pumping status'] = 0.00 # water_balance_df['pumping (cu.m)'] = 0.00 """ Pumping """ for index, row in water_balance_df.iterrows(): if index > min(water_balance_df.index): previous_time = index - timedelta(seconds=1800) obs_stage = row['stage(m)'] v1 = water_balance_df['volume (cu.m)'][previous_time.strftime('%Y-%m-%d %H:%M:%S')] v2 = water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] if ((v1 -v2) > 14) and obs_stage < 1.9: water_balance_df['pumping status'][index.strftime(date_format)] = 1 # water_balance_df['pumping (cu.m)'][index.strftime(date_format)] = 14 """ Overflow """ full_vol = stage_vol_df['total_vol_cu_m'][1.9] full_stage = 1.9 length_check_dam = 17 # b width_check_dam = 0.5 # L # discharge_coeff = 1.704 # http://pubs.usgs.gov/wsp/0200/report.pdf page 9 no_of_contractions = 0 water_balance_df['overflow(cu.m)'] = 0.000 for index in water_balance_df.index: obs_stage = water_balance_df['stage(m)'][index.strftime(date_format)] previous_time = index - timedelta(seconds=1800) if (np.around(obs_stage, 2)) > full_stage: effective_head = obs_stage - full_stage # eff_head_width_ratio = effective_head/width_check_dam # if eff_head_width_ratio > 0.27: # discharge_coeff = (0.309*eff_head_width_ratio) + 0.796 # else: # discharge_coeff = eff_head_width_ratio + 0.612 # print effective_head if np.around(water_balance_df['stage(m)'][previous_time.strftime(date_format)], 2) > full_stage: water_balance_df['overflow(cu.m)'][index.strftime(date_format)] = 1800*1.84*width_check_dam*(effective_head**1.5) else: water_balance_df['overflow(cu.m)'][index.strftime(date_format)] = 900*1.84*width_check_dam*(effective_head**1.5) # for index, row in water_balance_df.iterrows(): # # obs_stage = row['volu'] # obs_vol = row['volume (cu.m)'] # if obs_vol > full_vol: # # print obs_vol # water_balance_df['overflow(cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = obs_vol - full_vol # water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = full_vol # water_balance_df['stage(m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = 1.9 # start from May 15 water_balance_df = water_balance_df["2014-05-15":] # print water_balance_df.head() water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/30_min_check.csv') # print ch_storage_df.head() # fig = plt.figure(figsize=(11.69, 8.27)) # plt.plot_date(water_balance_df.index, water_balance_df['volume (cu.m)'], '-g') # plt.hlines(stage_vol_df['total_vol_cu_m'][1.9], min(water_balance_df.index), max(water_balance_df.index)) # plt.title('after overflow correction') """ Stage vs area linear relationship """ stage_area_df = pd.read_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/cont_area.csv', sep=',', header=0, names=['sno', 'stage_m', 'total_area_sq_m']) stage_area_df.drop('sno', inplace=True, axis=1) # set stage as index stage_area_df.set_index(stage_area_df['stage_m'], inplace=True) # print max(water_balance_df['stage(m)']) # print find_range(stage_area_df['stage_m'].tolist(), max(water_balance_df['stage(m)'])) #create empty column water_balance_df['ws_area(sq.m)'] = 0.000 for index, row in water_balance_df.iterrows(): obs_stage = row['stage(m)'] # observed stage if obs_stage >= stage_cutoff: x1, x2 = find_range(stage_area_df['stage_m'].tolist(), obs_stage) x_diff = x2-x1 y1 = stage_area_df['total_area_sq_m'][x1] y2 = stage_area_df['total_area_sq_m'][x2] y_diff = y2 - y1 slope = y_diff/x_diff y_intercept = y2 - (slope*x2) water_balance_df['ws_area(sq.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = (slope*obs_stage) + y_intercept """ Evaporation Volume estimation """ water_balance_df['Evaporation (cu.m)'] = (water_balance_df['Evaporation (mm/30min)'] * 0.001) * water_balance_df['ws_area(sq.m)'] # start from May 15 """ Daily Totals of Rain, Evaporation, Overflow """ sum_df = water_balance_df[['Rain Collection (mm)', 'Evaporation (cu.m)', 'Evaporation (mm/30min)', 'overflow(cu.m)', 'pumping status']] sum_df = sum_df.resample('D', how=np.sum) # print sum_df.head(10) """ Daily average of Stage """ stage_df = water_balance_df[['stage(m)']] stage_df = stage_df.resample('D', how=np.mean) # print stage_df.head() water_balance_daily_df = sum_df.join(stage_df, how='left') water_balance_daily_df['ws_area(sq.m)'] = 0.000 for index, row in water_balance_daily_df.iterrows(): obs_stage = row['stage(m)'] # observed stage x1, x2 = find_range(stage_area_df['stage_m'].tolist(), obs_stage) x_diff = x2-x1 y1 = stage_area_df['total_area_sq_m'][x1] y2 = stage_area_df['total_area_sq_m'][x2] y_diff = y2 - y1 slope = y_diff/x_diff y_intercept = y2 - (slope*x2) water_balance_daily_df['ws_area(sq.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = (slope*obs_stage) + y_intercept # print water_balance_daily_df.head() """ Change in storage """ # separate out 23:30 readings hour = water_balance_df.index.hour minute = water_balance_df.index.minute ch_storage_df = water_balance_df[['volume (cu.m)']][((hour == 23) & (minute == 30))] ch_storage_df = ch_storage_df.resample('D', how=np.mean) water_balance_daily_df['change_storage(cu.m)'] = 0.000 # print water_balance_daily_df.head() check_df = ch_storage_df['2014-05-29': ] #average print check_df.head() for index in ch_storage_df.index: if index > min(ch_storage_df.index): previous_date = index - timedelta(days=1) d1_storage = ch_storage_df['volume (cu.m)'][previous_date.strftime(daily_format)] d2_storage = ch_storage_df['volume (cu.m)'][index.strftime(daily_format)] water_balance_daily_df['change_storage(cu.m)'][index.strftime(daily_format)] = d2_storage - d1_storage # for d1, d2 in pairwise(ch_storage_df.index): # if d2 > d1: # diff = (d2-d1).days # if diff == 1: # d1_storage = ch_storage_df['volume (cu.m)'][d1.strftime('%Y-%m-%d')] # d2_storage = ch_storage_df['volume (cu.m)'][d2.strftime('%Y-%m-%d')] # water_balance_daily_df['change_storage(cu.m)'][d2.strftime('%Y-%m-%d')] = d2_storage - d1_storage # # print water_balance_daily_df.head() new_df = water_balance_daily_df.join(ch_storage_df, how='right') new_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/proof.csv') # new_index = pd.date_range(start='2014-05-15', end='2014-09-10', freq='D' ) # print new_index # print ch_storage_df.head() """ Separate out no inflow/ non rainy days two continuous days of no rain """ water_balance_daily_df['status'] = "Y" no_rain_df = water_balance_daily_df[water_balance_daily_df['Rain Collection (mm)'] == 0] # no_rain_df['status'] = "Y" for index in water_balance_daily_df.index: initial_time_stamp = min(water_balance_daily_df.index) + timedelta(days=1) if index > initial_time_stamp: start_date = index - timedelta(days=1) two_days_rain_df = water_balance_daily_df['Rain Collection (mm)'][start_date.strftime('%Y-%m-%d'):index.strftime('%Y-%m-%d')] sum_df = two_days_rain_df.sum(axis=0) if (sum_df == 0) and (water_balance_daily_df['change_storage(cu.m)'][index.strftime('%Y-%m-%d')] < 0) and (water_balance_daily_df['overflow(cu.m)'][index.strftime(daily_format)] == 0) and (water_balance_daily_df['pumping status'][index.strftime(daily_format)] == 0): water_balance_daily_df['status'][index.strftime('%Y-%m-%d')] = "N" # print no_rain_df.head() water_balance_daily_df.to_csv("/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/water_bal.csv") no_rain_df.to_csv("/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/no_rain_df.csv") dry_water_balance_df = water_balance_daily_df[water_balance_daily_df['status'] == "N"] rain_water_balance_df = water_balance_daily_df[water_balance_daily_df['status'] == "Y"] # dry_water_balance_df = dry_water_balance_df[dry_water_balance_df['stage(m)'] > 0.1] # rain_water_balance_df = rain_water_balance_df[rain_water_balance_df['stage(m)'] > 0.1] # print dry_water_balance_df.head() # print rain_water_balance_df.head() # # b = plot_date(dry_water_balance_df, 'change_storage(cu.m)') """ # Calculate infiltration # """ # # calculate infiltration # dry_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/dry_wb_check.CSV') # # print dry_water_balance_df.head() dry_water_balance_df['infiltration(cu.m)'] = 0.000 delta_s = water_balance_daily_df['change_storage(cu.m)'] evap = water_balance_daily_df['Evaporation (cu.m)'] outflow = water_balance_daily_df['overflow(cu.m)'] # for t1, t2 in pairwise(dry_water_balance_df.index): # diff = abs((t2-t1).seconds) # if diff == 1800: # # print t1, t2 # dry_water_balance_df['infiltration(cu.m)'][t1.strftime('%Y-%m-%d %H:%M:%S')] = -1*(delta_s[t2.strftime('%Y-%m-%d %H:%M:%S')] + evap[t2.strftime('%Y-%m-%d %H:%M:%S')] + outflow[t2.strftime('%Y-%m-%d %H:%M:%S')]) for index, row in dry_water_balance_df.iterrows(): if index > min(water_balance_daily_df.index): t_1 = index - timedelta(days=1) if t_1 < max(water_balance_daily_df.index): dry_water_balance_df['infiltration(cu.m)'][index.strftime(daily_format)] = -1.0*(delta_s[index.strftime(daily_format)] + evap[index.strftime(daily_format)]) # print row dry_water_balance_df['infiltration rate (m/day)'] = 0.0 for i in dry_water_balance_df.index: i_1 = i - timedelta(days=1) dry_water_balance_df['infiltration rate (m/day)'][i.strftime(daily_format)] = dry_water_balance_df['infiltration(cu.m)'][i.strftime(daily_format)] / water_balance_daily_df['ws_area(sq.m)'][i.strftime(daily_format)] # dry_water_balance_df['infiltration(cu.m)'] = -1.0*(evap + outflow + delta_s) # # print dry_water_balance_df.head() # # fig = plt.figure(figsize=(11.69, 8.27)) # # plt.plot(dry_water_balance_df['average_stage_m'], dry_water_balance_df['infiltration(cu.m)'], 'bo') # # plt.show() # print dry_water_balance_df """ Dry infiltration vs rainfall """ fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(11.69, 8.27)) # fig.subplots_adjust(right=0.8) line1 = ax1.bar(water_balance_daily_df.index, water_balance_daily_df['Rain Collection (mm)'], 0.35, label=r'Rainfall(mm)') plt.gca().invert_yaxis() ax1.xaxis.tick_bottom() ax1.yaxis.tick_left() for t1 in ax1.get_yticklabels(): t1.set_color('b') # plt.legend(loc='upper left') ax2 = ax1.twinx() cmap, norm = mpl.colors.from_levels_and_colors([0, 0.05, 1, 1.5, 2.0], ['red', 'yellow', 'green', 'blue']) line2 = ax2.scatter(dry_water_balance_df.index, dry_water_balance_df['infiltration(cu.m)'], label='Infiltration (cu.m)', c=dry_water_balance_df['stage(m)'], cmap=cmap, norm=norm) plt.hlines(0, min(dry_water_balance_df.index), max(dry_water_balance_df.index)) ax2.xaxis.tick_bottom() ax2.yaxis.tick_right() for t1 in ax2.get_yticklabels(): t1.set_color('r') # # plt.legend(loc='upper right') # # fig.autofmt_xdate(rotation=90) # # fig.subplots_adjust(right=0.8) # ax3 = ax2.twiny() # line3 = ax3.line(water_balance_daily_df.index, water_balance_daily_df['Evaporation (cu.m)'], 0.35, 'g', alpha=0.5, label='Evaporation (cu.m)' ) # ax3.tick_params(axis='x', # which='both', # top='off', # bottom='off', # labeltop='off') # # ax3.xaxis.tick_bottom() # ax3.yaxis.tick_right() fig.autofmt_xdate(rotation=90) # lns = line1+line3 # labs = [l.get_label() for l in lns] # ax3.legend(lns, labs, loc='upper center', fancybox=True, ncol=3, bbox_to_anchor=(0.5, 1.15)) # ax3.set_xlim([min(dry_water_balance_df.index), max(dry_water_balance_df.index)]) fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.50, 0.05, 0.3]) #first one distance from plot, second height # cax, kw = mpl.colorbar.make_axes([ax for ax in ax1.flat()]) cbar = fig.colorbar(line2, cax=cbar_ax) cbar.ax.set_ylabel('Stage (m)') plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/new_dry_rain_infiltration_stage_591_30min') # plt.show() """ Fitting exponential function """ stage_cal = dry_water_balance_df['stage(m)'] # stage_cal = dry_water_balance_df['average_stage_m'] inf_cal = dry_water_balance_df['infiltration rate (m/day)'] # print dry_water_balance_df.shape fig = plt.figure(figsize=(11.69, 8.27)) plt.plot(dry_water_balance_df['stage(m)'], dry_water_balance_df['infiltration(cu.m)'], 'bo') fig.autofmt_xdate(rotation=90) # plt.show() # plt.plot(stage_cal, inf_cal, 'bo', label=r'Observation') # plt.vlines(1.9, 0, max(inf_cal), 'g') # plt.hlines(0, min(stage_cal), max(stage_cal), 'y') # plt.legend(loc='upper left') # plt.xlabel(r'\textbf{Stage} (m)') # plt.ylabel(r'\textbf{Infiltration} ($m^3/day$)') # plt.title(r"Stage - Infiltration relationship during no inflow for 591 check dam") # plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/infiltration_stage_591_30min') # plt.show() def func(h, alpha, beta): return alpha*(h**beta) popt, pcov = curve_fit(func, stage_cal, inf_cal, maxfev=6000) # print popt # print pcov # # print np.diag(pcov) # print np.sqrt(np.diag(pcov)) # plot stage_cal_new = np.linspace(min(stage_cal), max(stage_cal), 50) inf_cal_new = func(stage_cal_new, *popt) fig = plt.figure(figsize=(11.69, 8.27)) plt.plot(stage_cal, inf_cal, 'bo', label=r'Observation') plt.plot(stage_cal_new, inf_cal_new, 'r-', label='Prediction') plt.vlines(1.9, 0, max(inf_cal), 'g') plt.hlines(0, min(stage_cal), max(stage_cal), 'y') plt.legend(loc='upper right') plt.xlabel(r'\textbf{Stage} (m)') plt.ylabel(r'\textbf{Infiltration Rate} ($m/day$)') plt.title(r"No inflow day's stage - infiltration relationship for 591 check dam") plt.text(x=0.4, y=.003, fontsize=15, s=r'$Infiltration = {0:.2f}{{h_{{av}}}}^{{{1:.2f}}}$'.format(popt[0], popt[1])) plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/new_stage_inf_exp_dry_591_30min') # plt.show() # print dry_water_balance_df # print dry_water_balance_df[dry_water_balance_df['infiltration(cu.m)'] < 0] # plot rainfall vs stage # # fig, ax1 = plt.subplots(figsize=(11.69, 8.27)) # ax1.bar(water_balance_daily_df.index, water_balance_daily_df['Rain Collection (mm)'], 0.35, color='b', label=r'Rainfall(mm)') # plt.gca().invert_yaxis() # for t1 in ax1.get_yticklabels(): # t1.set_color('b') # ax1.set_ylabel('Rainfall(mm)') # plt.legend(loc='upper left') # ax2 = ax1.twinx() # ax2.plot_date(water_balance_daily_df.index, water_balance_daily_df['stage(m)'], 'r', label='stage (m)') # for t1 in ax2.get_yticklabels(): # t1.set_color('r') # plt.legend(loc='upper right') # fig.autofmt_xdate(rotation=90) # plt.show() """ Rainy day infiltration """ rain_water_balance_df['infiltration(cu.m)'] = 0.0 rain_water_balance_df['infiltration rate (m/day)'] = popt[0]*(rain_water_balance_df['stage(m)']**popt[1]) for i in rain_water_balance_df.index: if i > min(water_balance_daily_df.index): i_1 = i - timedelta(days=1) rain_water_balance_df['infiltration(cu.m)'][i.strftime(daily_format)] = rain_water_balance_df['infiltration rate (m/day)'][i.strftime(daily_format)]*water_balance_daily_df['ws_area(sq.m)'][i.strftime(daily_format)] fig = plt.figure(figsize=(11.69, 8.27)) plt.plot(rain_water_balance_df['stage(m)'], rain_water_balance_df['infiltration(cu.m)'], 'bo', label='Predicted Infiltration' ) # # plt.vlines(1.9, 0, 100, 'g') # # plt.xlim([-1, 2.0]) # # plt.legend(loc='upper left') plt.xlabel(r'\textbf{Stage} (m)') plt.ylabel(r'\textbf{Infiltration} ($m^3/day$)') plt.title(r"Inflow day's stage - infiltration relationship for 591 check dam") plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/new_rain_inf_591_30min') """ Pumping calculation """ merged_water_balance = pd.concat([dry_water_balance_df, rain_water_balance_df]) pump_df = merged_water_balance[merged_water_balance['pumping status'] > 0] print pump_df merged_water_balance['pumping (cu.m)'] = 0.000 for index in pump_df.index: merged_water_balance['pumping (cu.m)'][index.strftime(daily_format)] = -1.0*(merged_water_balance['change_storage(cu.m)'][index.strftime(daily_format)] + merged_water_balance['infiltration(cu.m)'][index.strftime(daily_format)] + merged_water_balance['Evaporation (cu.m)'][index.strftime(daily_format)] + merged_water_balance['overflow(cu.m)'][index.strftime(daily_format)]) # raise SystemExit(0) """ Inflow calculation """ # merged_water_balance = pd.concat([dry_water_balance_df, rain_water_balance_df]) merged_water_balance['Inflow (cu.m)'] = 0.000 delta_s_rain = water_balance_daily_df['change_storage(cu.m)'] inf_rain = merged_water_balance['infiltration(cu.m)'] evap_rain = water_balance_daily_df['Evaporation (cu.m)'] outflow_rain = water_balance_daily_df['overflow(cu.m)'] pump_rain = merged_water_balance['pumping (cu.m)'] for i, row in merged_water_balance.iterrows(): if i > min(merged_water_balance.index): string1 = intern(row['status']) string2 = intern('N') if string1 != string2: # i_1 = i - timedelta(days=1) merged_water_balance['Inflow (cu.m)'][i.strftime(daily_format)] = (delta_s_rain[i.strftime(daily_format)] + inf_rain[i.strftime(daily_format)] + evap_rain[i.strftime(daily_format)] + outflow_rain[i.strftime(daily_format)] + pump_rain[i.strftime(daily_format)]) # merged_water_balance = pd.concat([dry_water_balance_df, rain_water_balance_df]) # pd.PeriodIndex(ch_storage_df.index, freq='D') # merged_water_balance = merged_water_balance.join(ch_storage_df, how='left') merged_water_balance.sort_index(inplace=True) dry_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/new_dry_wb_30min.CSV') rain_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/new_rain_wb_30min.CSV') merged_water_balance.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/new_wb_30min.CSV') # print merged_water_balance.head() daily_df = check_df.join(merged_water_balance, how='left') # print daily_df.head() daily_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/daily_3day_check.CSV') # check_volume_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/30min_3day_check.CSV') """ Evaporation vs infiltration """ fig, ax1 = plt.subplots(figsize=(11.69, 8.27)) line1 = ax1.bar(merged_water_balance.index, merged_water_balance['Evaporation (cu.m)'], 0.45, color='r', label=r"\textbf{Evaporation ($m^3/day$)}") # plt.title("Evaporation vs Infiltration for Check dam 591") for t1 in ax1.get_yticklabels(): t1.set_color('r') ax2 = ax1.twiny() line2 = ax2.bar(merged_water_balance.index, merged_water_balance['infiltration(cu.m)'], 0.45, color='g', alpha=0.5, label=r"\textbf{Infiltration ($m^3/day$}") for t1 in ax2.get_yticklabels(): t1.set_color('g') lns = [line1, line2] lab = [r"\textbf{Evaporation ($m^3/day$)}", r"\textbf{Infiltration ($m^3/day$}" ] # ax2.legend(lns, lab, loc='upper center', fancybox=True, ncol=2, bbox_to_anchor=(0.5, 1.15)) fig.autofmt_xdate(rotation=90) plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/evap_infilt_591_30min_n') # plt.show() # print merged_water_balance.head() print merged_water_balance['Evaporation (cu.m)'].sum() print merged_water_balance['infiltration(cu.m)'].sum() print merged_water_balance['overflow(cu.m)'].sum() print merged_water_balance['Inflow (cu.m)'].sum() print merged_water_balance['pumping (cu.m)'].sum() wb = (merged_water_balance['Evaporation (cu.m)'].sum()+ merged_water_balance['infiltration(cu.m)'].sum() + merged_water_balance['overflow(cu.m)'].sum()+merged_water_balance['pumping (cu.m)'].sum()) - merged_water_balance['Inflow (cu.m)'].sum() print wb merged_water_balance.index.name = 'Date' print merged_water_balance.head() merged_water_balance.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/et_infilt_591_w_of.csv')
gpl-3.0
awacha/cct
cct/qtgui/core/plotimage/plotimage.py
1
16195
import datetime import gc import matplotlib.cm import matplotlib.colors import numpy as np import sastool.io.credo_cct import scipy.misc from PyQt5 import QtWidgets, QtGui from matplotlib.axes import Axes from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT from matplotlib.figure import Figure from sastool.classes2 import Exposure from .plotimage_ui import Ui_Form def get_colormaps(): return sorted( [cm for cm in dir(matplotlib.cm) if isinstance(getattr(matplotlib.cm, cm), matplotlib.colors.Colormap)], key=lambda x: x.lower()) class PlotImage(QtWidgets.QWidget, Ui_Form): lastinstances = [] _exposure: Exposure=None def __init__(self, parent=None, register_instance=True): QtWidgets.QWidget.__init__(self, parent) self._exposure = None self.previous_extent = None self.previous_axestype = None self.setupUi(self) if register_instance: type(self).lastinstances.append(self) @classmethod def get_lastinstance(cls): if not cls.lastinstances: return cls() else: obj = cls.lastinstances[-1] try: assert isinstance(obj, cls) obj.windowTitle() return obj except RuntimeError: # wrapped C/C++ object of type PlotImage has been deleted cls.lastinstances.remove(obj) # try again return cls.get_lastinstance() def closeEvent(self, event: QtGui.QCloseEvent): try: type(self).lastinstances.remove(self) except ValueError: pass event.accept() def _testimage(self): header = sastool.io.credo_cct.Header( {'accounting': {'operator': 'Anonymous', 'projectid': 'Dummy project', }, 'sample': {'title': 'Test image', 'transmission.val': 0.5, 'transmission.err': 0.01, 'thickness.val': 0.1, 'thickness.err': 0.001, 'distminus.val': 0, 'distminus.err': 0, 'positionx.val': 0, 'positiony.val': 0, 'positionx.err': 0, 'positiony.err': 0, }, 'motors': {'dummy_motor': 0, }, 'exposure': {'fsn': 0, 'exptime': 100, 'startdate': str(datetime.datetime.now()), }, 'geometry': {'wavelength': 0.15418, 'wavelength.err': 0.001, 'truedistance': 100, 'truedistance.err': 0.05, 'beamposy': 200, 'beamposy.err': 0.5, 'beamposx': 100, 'beamposx.err': 0.5, 'pixelsize': 0.172, 'mask': 'mask.mat', }, 'environment': {'temperature': 20, 'vacuum_pressure': 1e-5, }, 'datareduction': {'flux': 10, 'flux.err': 0.1, 'emptybeamFSN': 0, 'absintrefFSN': 0, 'absintfactor': 1, 'absintfactor.err': 0, } }) m = scipy.misc.face(True) self._exposure = sastool.io.credo_cct.Exposure(m * 1.0, m * 0.1, header, m - m.min() > 0.2 * (m.max() - m.min())) self.replot() def setPixelMode(self, pixelmode:bool): if pixelmode: self.axesComboBox.setCurrentIndex(self.axesComboBox.findText('abs. pixel')) self.axesComboBox.setVisible(not pixelmode) self.axesLabel.setVisible(not pixelmode) self.replot() def setupUi(self, Form): Ui_Form.setupUi(self, Form) #self.colourScaleComboBox.addItems(['linear', 'logarithmic', 'square', 'square root']) self.colourScaleComboBox.setCurrentIndex(self.colourScaleComboBox.findText('logarithmic')) self.paletteComboBox.addItems(get_colormaps()) self.paletteComboBox.setCurrentIndex(self.paletteComboBox.findText(matplotlib.rcParams['image.cmap'])) #self.axesComboBox.addItems(['abs. pixel', 'rel. pixel', 'detector radius', 'twotheta', 'q']) self.axesComboBox.setCurrentIndex(self.axesComboBox.findText('q')) layout = QtWidgets.QVBoxLayout(self.figureContainer) self.figure = Figure() self.canvas = FigureCanvasQTAgg(self.figure) self.axes = self.figure.add_subplot(1, 1, 1) assert isinstance(self.axes, Axes) self.axes.set_facecolor('black') layout.addWidget(self.canvas) self.figtoolbar = NavigationToolbar2QT(self.canvas, self.figureContainer) layout.addWidget(self.figtoolbar) assert isinstance(self.figtoolbar, QtWidgets.QToolBar) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/icons/plotimage_config.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.showToolbarButton = QtWidgets.QToolButton(self.figtoolbar) self.showToolbarButton.setIcon(icon) self.showToolbarButton.setText('Plot setup') self.showToolbarButton.setCheckable(True) self.showToolbarButton.setChecked(False) self.toolbar.setVisible(False) self.showToolbarButton.toggled.connect(self.toolbarVisibility) self.figtoolbar.insertWidget(self.figtoolbar.actions()[-1], self.showToolbarButton).setVisible(True) self.colourScaleComboBox.currentIndexChanged.connect(self.colourScaleChanged) self.axesComboBox.currentIndexChanged.connect(self.axesTypeChanged) self.paletteComboBox.currentIndexChanged.connect(self.paletteChanged) self.showColourBarToolButton.toggled.connect(self.showColourBarChanged) self.showMaskToolButton.toggled.connect(self.showMaskChanged) self.showBeamToolButton.toggled.connect(self.showBeamChanged) self.equalAspectToolButton.toggled.connect(self.replot) self._testimage() self.figtoolbar.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum) self.canvas.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) self.canvas.mpl_connect('resize_event', self.onCanvasResize) def onCanvasResize(self, event): self.figure.tight_layout() self.canvas.draw() def toolbarVisibility(self, state): self.toolbar.setVisible(state) def showColourBarChanged(self, checked): self.replot_colourbar() self.canvas.draw() def showMaskChanged(self, checked): self.replot_mask() self.canvas.draw() def showBeamChanged(self, checked): self.replot_crosshair() self.canvas.draw() def colourScaleChanged(self): self.replot() def paletteChanged(self): self.replot() def axesTypeChanged(self): self.replot() def replot_colourbar(self): if hasattr(self, '_colorbar'): self._colorbar.remove() del self._colorbar if self.showColourBarToolButton.isChecked(): self._colorbar = self.figure.colorbar(self._image, ax=self.axes, use_gridspec=True) def replot_mask(self): if hasattr(self, '_mask'): self._mask.remove() del self._mask if self.showMaskToolButton.isChecked(): ex = self.exposure() assert isinstance(ex, Exposure) mf = np.ones(ex.shape, np.float) mf[ex.mask != 0] = np.nan aspect = ['auto','equal'][self.equalAspectToolButton.isChecked()] self._mask = self.axes.imshow(mf, cmap='gray_r', interpolation='nearest', aspect=aspect, alpha=0.7, origin='upper', extent=self._image.get_extent(), zorder=2) gc.collect() def replot_crosshair(self): if hasattr(self, '_crosshair'): for c in self._crosshair: c.remove() del self._crosshair if self.showBeamToolButton.isChecked(): ex = self.exposure() assert isinstance(ex, Exposure) axestype = self.axesComboBox.currentText() if axestype == 'abs. pixel': matrix = self.exposure().intensity beampos = (ex.header.beamcenterx.val, ex.header.beamcentery.val) assert isinstance(self.axes, Axes) self._crosshair = self.axes.plot([0, matrix.shape[1]], [beampos[1], beampos[1]], 'w-', [beampos[0], beampos[0]], [0, matrix.shape[0]], 'w-', scalex=False, scaley=False) else: extent = self._image.get_extent() self._crosshair = self.axes.plot(extent[0:2], [0, 0], 'w-', [0, 0], extent[2:4], 'w-', scalex=False, scaley=False) gc.collect() def replot(self): ex = self.exposure() assert isinstance(ex, Exposure) assert isinstance(self.axes, Axes) if self.colourScaleComboBox.currentText() == 'linear': norm = matplotlib.colors.Normalize() elif self.colourScaleComboBox.currentText() == 'logarithmic': norm = matplotlib.colors.LogNorm() elif self.colourScaleComboBox.currentText() == 'square': norm = matplotlib.colors.PowerNorm(2) elif self.colourScaleComboBox.currentText() == 'square root': norm = matplotlib.colors.PowerNorm(0.5) else: assert False matrix = ex.intensity if self.colourScaleComboBox.currentText() in ['logarithmic', 'square', 'square root']: matrix[matrix <= 0] = np.nan beampos = (ex.header.beamcenterx.val, ex.header.beamcentery.val) distance = ex.header.distance.val wavelength = ex.header.wavelength.val pixelsize = ex.header.pixelsizex.val, ex.header.pixelsizey.val axesscale = self.axesComboBox.currentText() if axesscale != self.previous_axestype: self.previous_axestype = axesscale self.clear() return self.replot() if axesscale == 'abs. pixel': extent = (0, matrix.shape[1] - 1, matrix.shape[0] - 1, 0) # left, right, bottom, top elif axesscale == 'rel. pixel': extent = (0 - beampos[0], matrix.shape[1] - 1 - beampos[0], matrix.shape[0] - 1 - beampos[1], 0 - beampos[1],) elif axesscale == 'detector radius': extent = ( (0 - beampos[0]) * pixelsize[0], (matrix.shape[1] - 1 - beampos[0]) * pixelsize[0], (matrix.shape[0] - 1 - beampos[1]) * pixelsize[1], (0 - beampos[1]) * pixelsize[1], ) elif axesscale == 'twotheta': extent = (np.arctan((0 - beampos[0]) * pixelsize[0] / distance) * 180 / np.pi, np.arctan((matrix.shape[1] - 1 - beampos[ 0]) * pixelsize[0] / distance) * 180 / np.pi, np.arctan((matrix.shape[0] - 1 - beampos[1]) * pixelsize[1] / distance) * 180 / np.pi, np.arctan((0 - beampos[1]) * pixelsize[1] / distance) * 180 / np.pi, ) elif axesscale == 'q': extent = (4 * np.pi * np.sin( 0.5 * np.arctan((0 - beampos[0]) * pixelsize[0] / distance)) / wavelength, 4 * np.pi * np.sin(0.5 * np.arctan((matrix.shape[1] - 1 - beampos[ 0]) * pixelsize[0] / distance)) / wavelength, 4 * np.pi * np.sin(0.5 * np.arctan((matrix.shape[0] - 1 - beampos[ 1]) * pixelsize[1] / distance)) / wavelength, 4 * np.pi * np.sin( 0.5 * np.arctan( (0 - beampos[1]) * pixelsize[1] / distance)) / wavelength, ) else: raise ValueError(axesscale) if extent != self.previous_extent: self.previous_extent = extent self.clear() return self.replot() if hasattr(self, '_image'): if hasattr(self, '_colorbar'): self._colorbar.remove() del self._colorbar if self._image.get_extent() != extent: self.axes.axis( extent) self._image.remove() del self._image firstplot = False else: firstplot = True aspect = ['auto','equal'][self.equalAspectToolButton.isChecked()] self._image = self.axes.imshow(matrix, cmap=self.paletteComboBox.currentText(), norm=norm, aspect=aspect, interpolation='nearest', origin='upper', zorder=1, extent=extent) if firstplot: self.figtoolbar.update() if np.isfinite(matrix).sum() > 0: self.replot_colourbar() self.replot_crosshair() self.replot_mask() try: title = ex.header.title except KeyError: title = 'Untitled' self._title = self.axes.set_title('#{:d}: {} ({:.2f} mm)'.format(ex.header.fsn, title, ex.header.distance)) if axesscale == 'abs. pixel': self.axes.xaxis.set_label_text('Absolute column coordinate (pixel)') self.axes.yaxis.set_label_text('Absolute row coordinate (pixel)') elif axesscale == 'rel. pixel': self.axes.xaxis.set_label_text('Relative column coordinate (pixel)') self.axes.yaxis.set_label_text('Relative row coordinate (pixel)') elif axesscale == 'detector radius': self.axes.xaxis.set_label_text('Horizontal distance from the beam center (mm)') self.axes.yaxis.set_label_text('Vertical distance from the beam center (mm)') elif axesscale == 'twotheta': self.axes.xaxis.set_label_text('$2\\theta_x$ ($^\circ$)') self.axes.yaxis.set_label_text('$2\\theta_y$ ($^\circ$)') elif axesscale == 'q': self.axes.xaxis.set_label_text('$q_x$ (nm$^{-1}$)') self.axes.yaxis.set_label_text('$q_y$ (nm$^{-1}$)') else: assert False self.canvas.draw() gc.collect() def setExposure(self, exposure: Exposure): self._exposure = exposure del exposure self.replot() def exposure(self) -> Exposure: return self._exposure def setOnlyAbsPixel(self, status=True): if status: self.axesComboBox.setCurrentIndex(0) self.axesComboBox.setEnabled(False) else: self.axesComboBox.setEnabled(True) def setMaskMatrix(self, mask:np.ndarray): if self._exposure.mask.shape==mask.shape: self._exposure.mask = mask self.replot_mask() self.canvas.draw() else: raise ValueError('Mismatched mask shape ({0[0]:d}, {0[1]:d}) for image of shape ({1[0]:d}, {1[1]:d})'.format(mask.shape, self._exposure.shape)) def maskMatrix(self)-> np.ndarray: return self._exposure.mask def clear(self): try: self._colorbar.remove() except (AttributeError, KeyError): pass for attr in ['_crosshair', '_image', '_mask', '_colorbar']: try: delattr(self, attr) except AttributeError: pass self.axes.clear() self.canvas.draw()
bsd-3-clause
redreamality/broca
examples/clustering.py
3
1520
""" This example demonstrates using broca with scikit-learn for clustering. A multi-pipeline is assembled to try four different bag-of-words vectorizers, each with a different tokenizer, and then compare their K-Means clustering results. """ from sklearn import metrics from sklearn.cluster import KMeans from broca.vectorize import BoW from broca.pipeline import Pipeline from broca.preprocess import Cleaner from broca.tokenize.keyword import Overkill, RAKE, POS from examples import load_data # A dataset of news article clusters data = load_data('10E.json') # Prep ground truth labelings docs = [] true = [] for i, e in enumerate(data): for a in e['articles']: docs.append(a['body']) true.append(i) # Prep Pipeline pipeline = Pipeline(Cleaner(), [ BoW(), BoW(tokenizer=Overkill), BoW(tokenizer=RAKE), BoW(tokenizer=POS) ]) # Run the pipeline and get the results for i, vecs in enumerate(pipeline(docs)): print('\n----------\n') print('Pipeline: ', pipeline.pipelines[i]) # Feed the resulting vectors into KMeans model = KMeans(n_clusters=10) pred = model.fit_predict(vecs) print('Completeness', metrics.completeness_score(true, pred)) print('Homogeneity', metrics.homogeneity_score(true, pred)) print('Adjusted Mutual Info', metrics.adjusted_mutual_info_score(true, pred)) print('Adjusted Rand', metrics.adjusted_rand_score(true, pred))
mit
Akshay0724/scikit-learn
sklearn/linear_model/coordinate_descent.py
3
81531
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Gael Varoquaux <gael.varoquaux@inria.fr> # # License: BSD 3 clause import sys import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy import sparse from .base import LinearModel, _pre_fit from ..base import RegressorMixin from .base import _preprocess_data from ..utils import check_array, check_X_y from ..utils.validation import check_random_state from ..model_selection import check_cv from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import xrange from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_is_fitted from ..utils.validation import column_or_1d from ..exceptions import ConvergenceWarning from . import cd_fast ############################################################################### # Paths functions def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True, eps=1e-3, n_alphas=100, normalize=False, copy_X=True): """ Compute the grid of alpha values for elastic net parameter search Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication y : ndarray, shape (n_samples,) Target values Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. l1_ratio : float The elastic net mixing parameter, with ``0 < l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not supported) ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean, default True Whether to fit an intercept or not normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. """ if l1_ratio == 0: raise ValueError("Automatic alpha grid generation is not supported for" " l1_ratio=0. Please supply a grid by providing " "your estimator with the appropriate `alphas=` " "argument.") n_samples = len(y) sparse_center = False if Xy is None: X_sparse = sparse.isspmatrix(X) sparse_center = X_sparse and (fit_intercept or normalize) X = check_array(X, 'csc', copy=(copy_X and fit_intercept and not X_sparse)) if not X_sparse: # X can be touched inplace thanks to the above line X, y, _, _, _ = _preprocess_data(X, y, fit_intercept, normalize, copy=False) Xy = safe_sparse_dot(X.T, y, dense_output=True) if sparse_center: # Workaround to find alpha_max for sparse matrices. # since we should not destroy the sparsity of such matrices. _, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept, normalize, return_mean=True) mean_dot = X_offset * np.sum(y) if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if sparse_center: if fit_intercept: Xy -= mean_dot[:, np.newaxis] if normalize: Xy /= X_scale[:, np.newaxis] alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() / (n_samples * l1_ratio)) if alpha_max <= np.finfo(float).resolution: alphas = np.empty(n_alphas) alphas.fill(np.finfo(float).resolution) return alphas return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max), num=n_alphas)[::-1] def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute Lasso path with coordinate descent The Lasso optimization function varies for mono and multi-outputs. For mono-output tasks it is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <lasso>`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,), or (n_samples, n_outputs) Target values eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. positive : bool, default False If set to True, forces coefficients to be positive. return_n_iter : bool whether to return the number of iterations or not. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. Notes ----- See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. Note that in certain cases, the Lars solver may be significantly faster to implement this functionality. In particular, linear interpolation can be used to retrieve model coefficients between the values output by lars_path Examples --------- Comparing lasso_path and lars_path with interpolation: >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T >>> y = np.array([1, 2, 3.1]) >>> # Use lasso_path to compute a coefficient path >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) >>> print(coef_path) [[ 0. 0. 0.46874778] [ 0.2159048 0.4425765 0.23689075]] >>> # Now use lars_path and 1D linear interpolation to compute the >>> # same path >>> from sklearn.linear_model import lars_path >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') >>> from scipy import interpolate >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], ... coef_path_lars[:, ::-1]) >>> print(coef_path_continuous([5., 1., .5])) [[ 0. 0. 0.46915237] [ 0.2159048 0.4425765 0.23668876]] See also -------- lars_path Lasso LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas, alphas=alphas, precompute=precompute, Xy=Xy, copy_X=copy_X, coef_init=coef_init, verbose=verbose, positive=positive, return_n_iter=return_n_iter, **params) def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, check_input=True, **params): """Compute elastic net path with coordinate descent The elastic net optimization function varies for mono and multi-outputs. For mono-output tasks it is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,) or (n_samples, n_outputs) Target values l1_ratio : float, optional float between 0 and 1 passed to elastic net (scaling between l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso eps : float Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. return_n_iter : bool whether to return the number of iterations or not. positive : bool, default False If set to True, forces coefficients to be positive. check_input : bool, default True Skip input validation checks, including the Gram matrix when provided assuming there are handled by the caller when check_input=False. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. (Is returned when ``return_n_iter`` is set to True). Notes ----- See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example. See also -------- MultiTaskElasticNet MultiTaskElasticNetCV ElasticNet ElasticNetCV """ # We expect X and y to be already Fortran ordered when bypassing # checks if check_input: X = check_array(X, 'csc', dtype=[np.float64, np.float32], order='F', copy=copy_X) y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False, ensure_2d=False) if Xy is not None: # Xy should be a 1d contiguous array or a 2D C ordered array Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False, ensure_2d=False) n_samples, n_features = X.shape multi_output = False if y.ndim != 1: multi_output = True _, n_outputs = y.shape # MultiTaskElasticNet does not support sparse matrices if not multi_output and sparse.isspmatrix(X): if 'X_offset' in params: # As sparse matrices are not actually centered we need this # to be passed to the CD solver. X_sparse_scaling = params['X_offset'] / params['X_scale'] X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype) else: X_sparse_scaling = np.zeros(n_features, dtype=X.dtype) # X should be normalized and fit already if function is called # from ElasticNet.fit if check_input: X, y, X_offset, y_offset, X_scale, precompute, Xy = \ _pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False, copy=False) if alphas is None: # No need to normalize of fit_intercept: it has been done # above alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio, fit_intercept=False, eps=eps, n_alphas=n_alphas, normalize=False, copy_X=False) else: alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered n_alphas = len(alphas) tol = params.get('tol', 1e-4) max_iter = params.get('max_iter', 1000) dual_gaps = np.empty(n_alphas) n_iters = [] rng = check_random_state(params.get('random_state', None)) selection = params.get('selection', 'cyclic') if selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (selection == 'random') if not multi_output: coefs = np.empty((n_features, n_alphas), dtype=X.dtype) else: coefs = np.empty((n_outputs, n_features, n_alphas), dtype=X.dtype) if coef_init is None: coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype)) else: coef_ = np.asfortranarray(coef_init, dtype=X.dtype) for i, alpha in enumerate(alphas): l1_reg = alpha * l1_ratio * n_samples l2_reg = alpha * (1.0 - l1_ratio) * n_samples if not multi_output and sparse.isspmatrix(X): model = cd_fast.sparse_enet_coordinate_descent( coef_, l1_reg, l2_reg, X.data, X.indices, X.indptr, y, X_sparse_scaling, max_iter, tol, rng, random, positive) elif multi_output: model = cd_fast.enet_coordinate_descent_multi_task( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random) elif isinstance(precompute, np.ndarray): # We expect precompute to be already Fortran ordered when bypassing # checks if check_input: precompute = check_array(precompute, dtype=X.dtype.type, order='C') model = cd_fast.enet_coordinate_descent_gram( coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter, tol, rng, random, positive) elif precompute is False: model = cd_fast.enet_coordinate_descent( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive) else: raise ValueError("Precompute should be one of True, False, " "'auto' or array-like. Got %r" % precompute) coef_, dual_gap_, eps_, n_iter_ = model coefs[..., i] = coef_ dual_gaps[i] = dual_gap_ n_iters.append(n_iter_) if dual_gap_ > eps_: warnings.warn('Objective did not converge.' + ' You might want' + ' to increase the number of iterations.' + ' Fitting data with very small alpha' + ' may cause precision problems.', ConvergenceWarning) if verbose: if verbose > 2: print(model) elif verbose > 1: print('Path: %03i out of %03i' % (i, n_alphas)) else: sys.stderr.write('.') if return_n_iter: return alphas, coefs, dual_gaps, n_iters return alphas, coefs, dual_gaps ############################################################################### # ElasticNet model class ElasticNet(LinearModel, RegressorMixin): """Linear regression with combined L1 and L2 priors as regularizer. Minimizes the objective function:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 where:: alpha = a + b and l1_ratio = a / (a + b) The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, unless you supply your own sequence of alpha. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- alpha : float, optional Constant that multiplies the penalty terms. Defaults to 1.0. See the notes for the exact mathematical meaning of this parameter.``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised. Given this, you should use the :class:`LinearRegression` object. l1_ratio : float The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. fit_intercept : bool Whether the intercept should be estimated or not. If ``False``, the data is assumed to be already centered. normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : True | False | array-like Whether to use a precomputed Gram matrix to speed up calculations. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. max_iter : int, optional The maximum number of iterations copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Notes ----- To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- SGDRegressor: implements elastic net regression with incremental training. SGDClassifier: implements logistic regression with elastic net penalty (``SGDClassifier(loss="log", penalty="elasticnet")``). """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, precompute=False, max_iter=1000, copy_X=True, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): self.alpha = alpha self.l1_ratio = l1_ratio self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.positive = positive self.random_state = random_state self.selection = selection def fit(self, X, y, check_input=True): """Fit model with coordinate descent. Parameters ----------- X : ndarray or scipy.sparse matrix, (n_samples, n_features) Data y : ndarray, shape (n_samples,) or (n_samples, n_targets) Target check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ if self.alpha == 0: warnings.warn("With alpha=0, this algorithm does not converge " "well. You are advised to use the LinearRegression " "estimator", stacklevel=2) if isinstance(self.precompute, six.string_types): raise ValueError('precompute should be one of True, False or' ' array-like. Got %r' % self.precompute) # We expect X and y to be float64 or float32 Fortran ordered arrays # when bypassing checks if check_input: X, y = check_X_y(X, y, accept_sparse='csc', order='F', dtype=[np.float64, np.float32], copy=self.copy_X and self.fit_intercept, multi_output=True, y_numeric=True) y = check_array(y, order='F', copy=False, dtype=X.dtype.type, ensure_2d=False) X, y, X_offset, y_offset, X_scale, precompute, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=False) if y.ndim == 1: y = y[:, np.newaxis] if Xy is not None and Xy.ndim == 1: Xy = Xy[:, np.newaxis] n_samples, n_features = X.shape n_targets = y.shape[1] if self.selection not in ['cyclic', 'random']: raise ValueError("selection should be either random or cyclic.") if not self.warm_start or not hasattr(self, "coef_"): coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order='F') else: coef_ = self.coef_ if coef_.ndim == 1: coef_ = coef_[np.newaxis, :] dual_gaps_ = np.zeros(n_targets, dtype=X.dtype) self.n_iter_ = [] for k in xrange(n_targets): if Xy is not None: this_Xy = Xy[:, k] else: this_Xy = None _, this_coef, this_dual_gap, this_iter = \ self.path(X, y[:, k], l1_ratio=self.l1_ratio, eps=None, n_alphas=None, alphas=[self.alpha], precompute=precompute, Xy=this_Xy, fit_intercept=False, normalize=False, copy_X=True, verbose=False, tol=self.tol, positive=self.positive, X_offset=X_offset, X_scale=X_scale, return_n_iter=True, coef_init=coef_[k], max_iter=self.max_iter, random_state=self.random_state, selection=self.selection, check_input=False) coef_[k] = this_coef[:, 0] dual_gaps_[k] = this_dual_gap[0] self.n_iter_.append(this_iter[0]) if n_targets == 1: self.n_iter_ = self.n_iter_[0] self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_]) self._set_intercept(X_offset, y_offset, X_scale) # workaround since _set_intercept will cast self.coef_ into X.dtype self.coef_ = np.asarray(self.coef_, dtype=X.dtype) # return self for chaining fit and predict calls return self @property def sparse_coef_(self): """ sparse representation of the fitted ``coef_`` """ return sparse.csr_matrix(self.coef_) def _decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape (n_samples,) The predicted decision function """ check_is_fitted(self, 'n_iter_') if sparse.isspmatrix(X): return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ else: return super(ElasticNet, self)._decision_function(X) ############################################################################### # Lasso model class Lasso(ElasticNet): """Linear Model trained with L1 prior as regularizer (aka the Lasso) The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Technically the Lasso model is optimizing the same objective function as the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). Read more in the :ref:`User Guide <lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1 term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised. Given this, you should use the :class:`LinearRegression` object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | array-like, default=False Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : int | array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, positive=False, precompute=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [ 0.85 0. ] >>> print(clf.intercept_) 0.15 See also -------- lars_path lasso_path LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): super(Lasso, self).__init__( alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, copy_X=copy_X, max_iter=max_iter, tol=tol, warm_start=warm_start, positive=positive, random_state=random_state, selection=selection) ############################################################################### # Functions for CV with paths functions def _path_residuals(X, y, train, test, path, path_params, alphas=None, l1_ratio=1, X_order=None, dtype=None): """Returns the MSE for the models computed by 'path' Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values train : list of indices The indices of the train set test : list of indices The indices of the test set path : callable function returning a list of models on the path. See enet_path for an example of signature path_params : dictionary Parameters passed to the path function alphas : array-like, optional Array of float that is used for cross-validation. If not provided, computed using 'path' l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 X_order : {'F', 'C', or None}, optional The order of the arrays expected by the path function to avoid memory copies dtype : a numpy dtype or None The dtype of the arrays expected by the path function to avoid memory copies """ X_train = X[train] y_train = y[train] X_test = X[test] y_test = y[test] fit_intercept = path_params['fit_intercept'] normalize = path_params['normalize'] if y.ndim == 1: precompute = path_params['precompute'] else: # No Gram variant of multi-task exists right now. # Fall back to default enet_multitask precompute = False X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \ _pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept, copy=False) path_params = path_params.copy() path_params['Xy'] = Xy path_params['X_offset'] = X_offset path_params['X_scale'] = X_scale path_params['precompute'] = precompute path_params['copy_X'] = False path_params['alphas'] = alphas if 'l1_ratio' in path_params: path_params['l1_ratio'] = l1_ratio # Do the ordering and type casting here, as if it is done in the path, # X is copied and a reference is kept here X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order) alphas, coefs, _ = path(X_train, y_train, **path_params) del X_train, y_train if y.ndim == 1: # Doing this so that it becomes coherent with multioutput. coefs = coefs[np.newaxis, :, :] y_offset = np.atleast_1d(y_offset) y_test = y_test[:, np.newaxis] if normalize: nonzeros = np.flatnonzero(X_scale) coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis] intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs) if sparse.issparse(X_test): n_order, n_features, n_alphas = coefs.shape # Work around for sparse matrices since coefs is a 3-D numpy array. coefs_feature_major = np.rollaxis(coefs, 1) feature_2d = np.reshape(coefs_feature_major, (n_features, -1)) X_test_coefs = safe_sparse_dot(X_test, feature_2d) X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1) else: X_test_coefs = safe_sparse_dot(X_test, coefs) residues = X_test_coefs - y_test[:, :, np.newaxis] residues += intercepts this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0) return this_mses class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)): """Base class for iterative model fitting along a regularization path""" @abstractmethod def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.copy_X = copy_X self.cv = cv self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit linear model with coordinate descent Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values """ y = check_array(y, copy=False, dtype=[np.float64, np.float32], ensure_2d=False) if y.shape[0] == 0: raise ValueError("y has 0 samples: %r" % y) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV): if model_str == 'ElasticNet': model = ElasticNet() else: model = Lasso() if y.ndim > 1 and y.shape[1] > 1: raise ValueError("For multi-task outputs, use " "MultiTask%sCV" % (model_str)) y = column_or_1d(y, warn=True) else: if sparse.isspmatrix(X): raise TypeError("X should be dense but a sparse matrix was" "passed") elif y.ndim == 1: raise ValueError("For mono-task outputs, use " "%sCV" % (model_str)) if model_str == 'ElasticNet': model = MultiTaskElasticNet() else: model = MultiTaskLasso() if self.selection not in ["random", "cyclic"]: raise ValueError("selection should be either random or cyclic.") # This makes sure that there is no duplication in memory. # Dealing right with copy_X is important in the following: # Multiple functions touch X and subsamples of X and can induce a # lot of duplication of memory copy_X = self.copy_X and self.fit_intercept if isinstance(X, np.ndarray) or sparse.isspmatrix(X): # Keep a reference to X reference_to_old_X = X # Let us not impose fortran ordering so far: it is # not useful for the cross-validation loop and will be done # by the model fitting itself X = check_array(X, 'csc', copy=False) if sparse.isspmatrix(X): if (hasattr(reference_to_old_X, "data") and not np.may_share_memory(reference_to_old_X.data, X.data)): # X is a sparse matrix and has been copied copy_X = False elif not np.may_share_memory(reference_to_old_X, X): # X has been copied copy_X = False del reference_to_old_X else: X = check_array(X, 'csc', dtype=[np.float64, np.float32], order='F', copy=copy_X) copy_X = False if X.shape[0] != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (X.shape[0], y.shape[0])) # All LinearModelCV parameters except 'cv' are acceptable path_params = self.get_params() if 'l1_ratio' in path_params: l1_ratios = np.atleast_1d(path_params['l1_ratio']) # For the first path, we need to set l1_ratio path_params['l1_ratio'] = l1_ratios[0] else: l1_ratios = [1, ] path_params.pop('cv', None) path_params.pop('n_jobs', None) alphas = self.alphas n_l1_ratio = len(l1_ratios) if alphas is None: alphas = [] for l1_ratio in l1_ratios: alphas.append(_alpha_grid( X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, normalize=self.normalize, copy_X=self.copy_X)) else: # Making sure alphas is properly ordered. alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) # We want n_alphas to be the number of alphas used for each l1_ratio. n_alphas = len(alphas[0]) path_params.update({'n_alphas': n_alphas}) path_params['copy_X'] = copy_X # We are not computing in parallel, we can modify X # inplace in the folds if not (self.n_jobs == 1 or self.n_jobs is None): path_params['copy_X'] = False # init cross-validation generator cv = check_cv(self.cv) # Compute path for all folds and compute MSE to get the best alpha folds = list(cv.split(X)) best_mse = np.inf # We do a double for loop folded in one, in order to be able to # iterate in parallel on l1_ratio and folds jobs = (delayed(_path_residuals)(X, y, train, test, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=X.dtype.type) for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) for train, test in folds) mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(jobs) mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) mean_mse = np.mean(mse_paths, axis=1) self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1)) for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): i_best_alpha = np.argmin(mse_alphas) this_best_mse = mse_alphas[i_best_alpha] if this_best_mse < best_mse: best_alpha = l1_alphas[i_best_alpha] best_l1_ratio = l1_ratio best_mse = this_best_mse self.l1_ratio_ = best_l1_ratio self.alpha_ = best_alpha if self.alphas is None: self.alphas_ = np.asarray(alphas) if n_l1_ratio == 1: self.alphas_ = self.alphas_[0] # Remove duplicate alphas in case alphas is provided. else: self.alphas_ = np.asarray(alphas[0]) # Refit the model with the parameters selected common_params = dict((name, value) for name, value in self.get_params().items() if name in model.get_params()) model.set_params(**common_params) model.alpha = best_alpha model.l1_ratio = best_l1_ratio model.copy_X = copy_X model.precompute = False model.fit(X, y) if not hasattr(self, 'l1_ratio'): del self.l1_ratio_ self.coef_ = model.coef_ self.intercept_ = model.intercept_ self.dual_gap_ = model.dual_gap_ self.n_iter_ = model.n_iter_ return self class LassoCV(LinearModelCV, RegressorMixin): """Lasso linear model with iterative fitting along a regularization path The best model is selected by cross-validation. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path alphas : numpy array, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional If positive, restrict regression coefficients to be positive selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean, default True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) intercept_ : float | array, shape (n_targets,) independent term in decision function. mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting dual_gap_ : ndarray, shape () The dual gap at the end of the optimization for the optimal alpha (``alpha_``). n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/plot_lasso_model_selection.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- lars_path lasso_path LassoLars Lasso LassoLarsCV """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): super(LassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive, random_state=random_state, selection=selection) class ElasticNetCV(LinearModelCV, RegressorMixin): """Elastic Net model with iterative fitting along a regularization path The best model is selected by cross-validation. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- l1_ratio : float or array of floats, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 This parameter can be a list, in which case the different values are tested by cross-validation and the one giving the best prediction score is used. Note that a good choice of list of values for l1_ratio is often to put more values close to 1 (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, .9, .95, .99, 1]`` eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path, used for each l1_ratio. alphas : numpy array, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation l1_ratio_ : float The compromise between l1 and l2 penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) Parameter vector (w in the cost function formula), intercept_ : float | array, shape (n_targets, n_features) Independent term in the decision function. mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds) Mean square error for the test set on each fold, varying l1_ratio and alpha. alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/plot_lasso_model_selection.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. More specifically, the optimization objective is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 for:: alpha = a + b and l1_ratio = a / (a + b). See also -------- enet_path ElasticNet """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection ############################################################################### # Multi Task ElasticNet and Lasso models (with joint feature selection) class MultiTaskElasticNet(Lasso): """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_elastic_net>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 l1_ratio : float The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it is an L2 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). If a 1D y is \ passed in at fit (non multi-task usage), ``coef_`` is then a 1D array n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True, l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.45663524 0.45612256] [ 0.45663524 0.45612256]] >>> print(clf.intercept_) [ 0.0872422 0.0872422] See also -------- ElasticNet, MultiTaskLasso Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.alpha = alpha self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit MultiTaskLasso model with coordinate descent Parameters ----------- X : ndarray, shape (n_samples, n_features) Data y : ndarray, shape (n_samples, n_tasks) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ X = check_array(X, dtype=[np.float64, np.float32], order='F', copy=self.copy_X and self.fit_intercept) y = check_array(y, dtype=X.dtype.type, ensure_2d=False) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if y.ndim == 1: raise ValueError("For mono-task outputs, use %s" % model_str) n_samples, n_features = X.shape _, n_tasks = y.shape if n_samples != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (n_samples, y.shape[0])) X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, self.fit_intercept, self.normalize, copy=False) if not self.warm_start or self.coef_ is None: self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type, order='F') l1_reg = self.alpha * self.l1_ratio * n_samples l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory if self.selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (self.selection == 'random') self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \ cd_fast.enet_coordinate_descent_multi_task( self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol, check_random_state(self.random_state), random) self._set_intercept(X_offset, y_offset, X_scale) if self.dual_gap_ > self.eps_: warnings.warn('Objective did not converge, you might want' ' to increase the number of iterations', ConvergenceWarning) # return self for chaining fit and predict calls return self class MultiTaskLasso(MultiTaskElasticNet): """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4 random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_tasks, n_features) parameter vector (W in the cost function formula) intercept_ : array, shape (n_tasks,) independent term in decision function. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskLasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.89393398 0. ] [ 0.89393398 0. ]] >>> print(clf.intercept_) [ 0.10606602 0.10606602] See also -------- Lasso, MultiTaskElasticNet Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.alpha = alpha self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.l1_ratio = 1.0 self.random_state = random_state self.selection = selection class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 ElasticNet with built-in cross-validation. The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automatically. n_alphas : int, optional Number of alphas along the regularization path l1_ratio : float or array of floats The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it is an L2 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. This parameter can be a list, in which case the different values are tested by cross-validation and the one giving the best prediction score is used. Note that a good choice of list of values for l1_ratio is often to put more values close to 1 (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, .9, .95, .99, 1]`` fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) or \ (n_l1_ratio, n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio l1_ratio_ : float best l1_ratio obtained by cross-validation. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNetCV() >>> clf.fit([[0,0], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001, fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100, n_jobs=1, normalize=False, random_state=None, selection='cyclic', tol=0.0001, verbose=0) >>> print(clf.coef_) [[ 0.52875032 0.46958558] [ 0.52875032 0.46958558]] >>> print(clf.intercept_) [ 0.00166409 0.00166409] See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskLassoCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state self.selection = selection class MultiTaskLassoCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 Lasso with built-in cross-validation. The optimization objective for MultiTaskLasso is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automatically. n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. This parameter is ignored when ``fit_intercept`` is set to ``False``. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use :class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskElasticNetCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, random_state=None, selection='cyclic'): super(MultiTaskLassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state, selection=selection)
bsd-3-clause
yanlend/scikit-learn
examples/ensemble/plot_forest_importances.py
168
1793
""" ========================================= Feature importances with forests of trees ========================================= This examples shows the use of forests of trees to evaluate the importance of features on an artificial classification task. The red bars are the feature importances of the forest, along with their inter-trees variability. As expected, the plot suggests that 3 features are informative, while the remaining are not. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.ensemble import ExtraTreesClassifier # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False) # Build a forest and compute the feature importances forest = ExtraTreesClassifier(n_estimators=250, random_state=0) forest.fit(X, y) importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(X.shape[1]): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show()
bsd-3-clause
joegomes/deepchem
contrib/atomicconv/feat/atomicnet_pdbbind_datasets.py
8
4738
""" PDBBind dataset loader. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import numpy as np import pandas as pd from atomicnet_coordinates import ComplexNeighborListFragmentAtomicCoordinates def load_pdbbind_labels(labels_file): """Loads pdbbind labels as dataframe Parameters ---------- labels_file: str Location of PDBbind datafile. Returns ------- contents_df: pd.DataFrame Dataframe containing contents of PDBbind datafile. """ contents = [] with open(labels_file) as f: for line in f: if line.startswith("#"): continue else: splitline = line.split() if len(splitline) == 8: contents.append(splitline) else: print("Incorrect data format") print(splitline) contents_df = pd.DataFrame( contents, columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki", "ignore-this-field", "reference", "ligand name")) return contents_df def compute_pdbbind_coordinate_features(complex_featurizer, pdb_subdir, pdb_code): """Compute features for a given complex Parameters ---------- complex_featurizer: dc.feat.ComplexFeaturizer Complex featurizer. pdb_subdir: str Location of complex PDB files. pdb_core: str Complex PDB code. Returns ------- feature: Tuple Complex features. """ protein_file = os.path.join(pdb_subdir, "%s_pocket.pdb" % pdb_code) ligand_file = os.path.join(pdb_subdir, "%s_ligand.pdb" % pdb_code) feature = complex_featurizer._featurize_complex( str(ligand_file), str(protein_file)) return feature def load_pdbbind_fragment_coordinates(frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors, neighbor_cutoff, pdbbind_dir, base_dir, datafile="INDEX_core_data.2013"): """Featurize PDBBind dataset. Parameters ---------- frag1_num_atoms: int Maximum number of atoms in fragment 1. frag2_num_atoms: int Maximum number of atoms in fragment 2. complex_num_atoms: int Maximum number of atoms in complex. max_num_neighbors: int Maximum number of neighbors per atom. neighbor_cutoff: float Interaction cutoff [Angstrom]. pdbbind_dir: str Location of PDBbind datafile. base_dir: str Location for storing featurized dataset. datafile: str Name of PDBbind datafile, optional (Default "INDEX_core_data.2013"). Returns ------- tasks: list PDBbind tasks. dataset: dc.data.DiskDataset PDBbind featurized dataset. transformers: list dc.trans.Transformer objects. """ # Create some directories for analysis # The base_dir holds the results of all analysis if not reload: if os.path.exists(base_dir): shutil.rmtree(base_dir) if not os.path.exists(base_dir): os.makedirs(base_dir) current_dir = os.path.dirname(os.path.realpath(__file__)) #Make directories to store the raw and featurized datasets. data_dir = os.path.join(base_dir, "dataset") # Load PDBBind dataset labels_file = os.path.join(pdbbind_dir, datafile) tasks = ["-logKd/Ki"] print("About to load contents.") contents_df = load_pdbbind_labels(labels_file) ids = contents_df["PDB code"].values y = np.array([float(val) for val in contents_df["-logKd/Ki"].values]) # Define featurizers featurizer = ComplexNeighborListFragmentAtomicCoordinates( frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors, neighbor_cutoff) w = np.ones_like(y) #Currently featurizes with shard_size=1 #Dataset can be reshard: dataset = dataset.reshard(48) for example def shard_generator(): for ind, pdb_code in enumerate(ids): print("Processing %s" % str(pdb_code)) pdb_subdir = os.path.join(pdbbind_dir, pdb_code) computed_feature = compute_pdbbind_coordinate_features( featurizer, pdb_subdir, pdb_code) if computed_feature[0] is None: print("Bad featurization") continue else: X_b = np.reshape(np.array(computed_feature), (1, 9)) y_b = y[ind] w_b = w[ind] y_b = np.reshape(y_b, (1, -1)) w_b = np.reshape(w_b, (1, -1)) yield (X_b, y_b, w_b, pdb_code) dataset = dc.data.DiskDataset.create_dataset( shard_generator(), data_dir=data_dir, tasks=tasks) transformers = [] return tasks, dataset, transformers
mit
magicknight/Observer
run.py
1
8980
#!/usr/bin/env python import numpy as np import os from os import walk from get_samples import get_hog_samples from get_classifier import get_classifier from get_location import get_location from output import output from os.path import join from shutil import rmtree from sklearn.externals import joblib as pickle import progressbar # from 5.29, 2014 __author__ = 'Zhihua Liang' __license__ = "GPL" __version__ = "1.0.1" __maintainer__ = "Zhihua Liang" __email__ = "liangzhihua@gmail.com" __status__ = "Development" #define the parameters dim_x = 760 dim_y = 195 dim_z = 240 orientations = 9 target_size = 48 pixels_per_cell = (4, 4) cells_per_block = (3, 3) # not ready to change this value weight_values = (1, 30) scan_window_size = (target_size, target_size) # on pixels out_path = 'result' # output directory training_path = '/home/zhihua/work/object_detector/image/25_50_75_0_1' test_path = '/home/zhihua/work/object_detector/image/25_50_75_2' classifier_name = 'sgd' # options are 'svm', 'sgd' for now classifier_file = 'classifier/sgd.pkl' re_train = False # only sgd get the retrain online_training = True # train on every single image when it is available. verbose = False # print debug message ######################################################### # training ######################################################### # get progress bar for display progress bar = progressbar.ProgressBar(maxval=100, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]) # get number of files in training directory number_of_total_files = sum([len(files) for r, d, files in os.walk(training_path)]) number_of_total_files_over_20 = number_of_total_files/100 + 1 file_count = 0 total_training_sample = [] total_training_label = [] if os.path.isfile(classifier_file): #load SVM if there exist trained SVM file. clf = pickle.load(classifier_file) # continue train the model with new data if re_train: print 'get re-training set' for root, dirs, files in walk(training_path): for file_name in files: training_sample, training_label, dummy = get_hog_samples(join(root, file_name), dim_x, dim_z, orientations, pixels_per_cell, cells_per_block, scan_window_size, training=True, verbose=verbose) if online_training: n_positive = np.count_nonzero(training_label) sample_weight = [weight_values[0]]*(len(training_label) - n_positive) + [weight_values[1]]*n_positive if file_count == 0: clf.partial_fit(training_sample, training_label, classes=np.unique(training_label)) # sample_weight=sample_weight) print 'training labels are', np.unique(training_label) else: clf.partial_fit(training_sample, training_label)#, sample_weight=sample_weight) else: total_training_sample = total_training_sample + training_sample total_training_label = total_training_label + training_label file_count += 1 #print 're-Training set contains', len(total_training_label), 'samples' if file_count/number_of_total_files_over_20 == float(file_count)/float(number_of_total_files_over_20): bar.update(file_count/number_of_total_files_over_20) if not online_training: clf.partial_fit(total_training_sample, total_training_label) # WARNING: Only SGD get the # online learning feature. pickle.dump(clf, classifier_file) # if no svm exist, create it and train else: #if no svm file exist, train it clf = get_classifier(classifier_name) #training samples and labels print 'Get training set on', training_path print 'Training on progress.... \n\n\n\n' for root, dirs, files in walk(training_path): for file_name in files: training_sample, training_label, dummy = get_hog_samples(join(root, file_name), dim_x, dim_z, orientations, pixels_per_cell, cells_per_block, scan_window_size, training=True, verbose=verbose) if online_training: n_positive = np.count_nonzero(training_label) sample_weight = [weight_values[0]]*(len(training_label) - n_positive) + [weight_values[1]]*n_positive if file_count == 0: clf.partial_fit(training_sample, training_label, classes=np.unique(training_label)) #sample_weight=sample_weight) print 'training labels are', np.unique(training_label) else: clf.partial_fit(training_sample, training_label)#, sample_weight=sample_weight) else: total_training_sample = total_training_sample + training_sample total_training_label = total_training_label + training_label file_count += 1 if file_count/number_of_total_files_over_20 == float(file_count)/float(number_of_total_files_over_20): bar.update(file_count/number_of_total_files_over_20) if not online_training: print '\n Training set contains', len(total_training_label), 'samples' print total_training_sample[0].shape clf.fit(total_training_sample, total_training_label) pickle.dump(clf, classifier_file) bar.finish() ######################################################### # test ######################################################### #remove the previous output if there exist any rmtree(out_path) os.makedirs(out_path) # get the samples from test folder. prediction_list = np.empty([]) for root, dirs, files in walk(test_path): for file_name in files: print '===========================================================================' print file_name test_sample, test_label, lesion_positions = get_hog_samples(join(root, file_name), dim_x, dim_z, orientations, pixels_per_cell, cells_per_block, scan_window_size, training=False, verbose=verbose) print 'Test set contains', len(test_label), 'samples' predict_label = clf.predict(test_sample) print 'Prediction-percentage-error is:', np.mean(predict_label != test_label) print np.where(np.array(test_label) == 1) print np.where(predict_label == 1) #go back to the original image axis label_x = (dim_x - scan_window_size[0])/pixels_per_cell[0]+1 label_y = (dim_z - scan_window_size[1])/pixels_per_cell[1]+1 #n_samples = len(lesion_positions) print 'label number is', label_x*label_y predict_label = predict_label.reshape([label_y, label_x]) y, x = np.where(predict_label[:, :] == 1) predict_lesion_position = np.dstack((x*pixels_per_cell[0]+target_size/2, y*pixels_per_cell[1]+target_size/2))[0] print 'candidate positions are:', predict_lesion_position # find the lesion location if predict_lesion_position.size != 0: position, confidence = get_location(predict_lesion_position, target_size) else: position = [-1, -1] confidence = 1 confidence = (confidence+1)/float(2) # get to the range of LROC analysis print 'predicted location is', position, 'with confidence', confidence lesion = int(file_name.split('lesion_')[-1].split('_')[0]) > 0 truth_x = int(file_name.split('x_')[-1].split('_')[0]) truth_y = int(file_name.split('z_')[-1].split('_')[0]) if lesion: print 'truth position is ', [truth_x, truth_y] else: print 'truth position is ', [-1, -1] # get the density value and projection number as output file name: projection_number = file_name.split('tvp_')[-1].split('_')[0] output_file_name = 'TP_' + projection_number + '.txt' #open out put file with open(join(out_path, output_file_name), 'a') as fid: output(file_name, position, confidence, fid)
gpl-2.0
jorge2703/scikit-learn
sklearn/utils/multiclass.py
83
12343
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi # # License: BSD 3 clause """ Multi-class / multi-label utility function ========================================== """ from __future__ import division from collections import Sequence from itertools import chain from scipy.sparse import issparse from scipy.sparse.base import spmatrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix import numpy as np from ..externals.six import string_types from .validation import check_array from ..utils.fixes import bincount def _unique_multiclass(y): if hasattr(y, '__array__'): return np.unique(np.asarray(y)) else: return set(y) def _unique_indicator(y): return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1]) _FN_UNIQUE_LABELS = { 'binary': _unique_multiclass, 'multiclass': _unique_multiclass, 'multilabel-indicator': _unique_indicator, } def unique_labels(*ys): """Extract an ordered array of unique labels We don't allow: - mix of multilabel and multiclass (single label) targets - mix of label indicator matrix and anything else, because there are no explicit labels) - mix of label indicator matrices of different sizes - mix of string and integer labels At the moment, we also don't allow "multiclass-multioutput" input type. Parameters ---------- *ys : array-likes, Returns ------- out : numpy array of shape [n_unique_labels] An ordered array of unique labels. Examples -------- >>> from sklearn.utils.multiclass import unique_labels >>> unique_labels([3, 5, 5, 5, 7, 7]) array([3, 5, 7]) >>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4]) array([1, 2, 3, 4]) >>> unique_labels([1, 2, 10], [5, 11]) array([ 1, 2, 5, 10, 11]) """ if not ys: raise ValueError('No argument has been passed.') # Check that we don't mix label format ys_types = set(type_of_target(x) for x in ys) if ys_types == set(["binary", "multiclass"]): ys_types = set(["multiclass"]) if len(ys_types) > 1: raise ValueError("Mix type of y not allowed, got types %s" % ys_types) label_type = ys_types.pop() # Check consistency for the indicator format if (label_type == "multilabel-indicator" and len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1): raise ValueError("Multi-label binary indicator input with " "different numbers of labels") # Get the unique set of labels _unique_labels = _FN_UNIQUE_LABELS.get(label_type, None) if not _unique_labels: raise ValueError("Unknown label type: %s" % repr(ys)) ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys)) # Check that we don't mix string type with number type if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1): raise ValueError("Mix of label input types (string and number)") return np.array(sorted(ys_labels)) def _is_integral_float(y): return y.dtype.kind == 'f' and np.all(y.astype(int) == y) def is_multilabel(y): """ Check if ``y`` is in a multilabel format. Parameters ---------- y : numpy array of shape [n_samples] Target values. Returns ------- out : bool, Return ``True``, if ``y`` is in a multilabel format, else ```False``. Examples -------- >>> import numpy as np >>> from sklearn.utils.multiclass import is_multilabel >>> is_multilabel([0, 1, 0, 1]) False >>> is_multilabel([[1], [0, 2], []]) False >>> is_multilabel(np.array([[1, 0], [0, 0]])) True >>> is_multilabel(np.array([[1], [0], [0]])) False >>> is_multilabel(np.array([[1, 0, 0]])) True """ if hasattr(y, '__array__'): y = np.asarray(y) if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1): return False if issparse(y): if isinstance(y, (dok_matrix, lil_matrix)): y = y.tocsr() return (len(y.data) == 0 or np.ptp(y.data) == 0 and (y.dtype.kind in 'biu' or # bool, int, uint _is_integral_float(np.unique(y.data)))) else: labels = np.unique(y) return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint _is_integral_float(labels)) def type_of_target(y): """Determine the type of data indicated by target `y` Parameters ---------- y : array-like Returns ------- target_type : string One of: * 'continuous': `y` is an array-like of floats that are not all integers, and is 1d or a column vector. * 'continuous-multioutput': `y` is a 2d array of floats that are not all integers, and both dimensions are of size > 1. * 'binary': `y` contains <= 2 discrete values and is 1d or a column vector. * 'multiclass': `y` contains more than two discrete values, is not a sequence of sequences, and is 1d or a column vector. * 'multiclass-multioutput': `y` is a 2d array that contains more than two discrete values, is not a sequence of sequences, and both dimensions are of size > 1. * 'multilabel-indicator': `y` is a label indicator matrix, an array of two dimensions with at least two columns, and at most 2 unique values. * 'unknown': `y` is array-like but none of the above, such as a 3d array, sequence of sequences, or an array of non-sequence objects. Examples -------- >>> import numpy as np >>> type_of_target([0.1, 0.6]) 'continuous' >>> type_of_target([1, -1, -1, 1]) 'binary' >>> type_of_target(['a', 'b', 'a']) 'binary' >>> type_of_target([1.0, 2.0]) 'binary' >>> type_of_target([1, 0, 2]) 'multiclass' >>> type_of_target([1.0, 0.0, 3.0]) 'multiclass' >>> type_of_target(['a', 'b', 'c']) 'multiclass' >>> type_of_target(np.array([[1, 2], [3, 1]])) 'multiclass-multioutput' >>> type_of_target([[1, 2]]) 'multiclass-multioutput' >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]])) 'continuous-multioutput' >>> type_of_target(np.array([[0, 1], [1, 1]])) 'multilabel-indicator' """ valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__')) and not isinstance(y, string_types)) if not valid: raise ValueError('Expected array-like (array or non-string sequence), ' 'got %r' % y) if is_multilabel(y): return 'multilabel-indicator' try: y = np.asarray(y) except ValueError: # Known to fail in numpy 1.3 for array of arrays return 'unknown' # The old sequence of sequences format try: if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence) and not isinstance(y[0], string_types)): raise ValueError('You appear to be using a legacy multi-label data' ' representation. Sequence of sequences are no' ' longer supported; use a binary array or sparse' ' matrix instead.') except IndexError: pass # Invalid inputs if y.ndim > 2 or (y.dtype == object and len(y) and not isinstance(y.flat[0], string_types)): return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"] if y.ndim == 2 and y.shape[1] == 0: return 'unknown' # [[]] if y.ndim == 2 and y.shape[1] > 1: suffix = "-multioutput" # [[1, 2], [1, 2]] else: suffix = "" # [1, 2, 3] or [[1], [2], [3]] # check float and contains non-integer float values if y.dtype.kind == 'f' and np.any(y != y.astype(int)): # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.] return 'continuous' + suffix if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1): return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]] else: return 'binary' # [1, 2] or [["a"], ["b"]] def _check_partial_fit_first_call(clf, classes=None): """Private helper function for factorizing common classes param logic Estimators that implement the ``partial_fit`` API need to be provided with the list of possible classes at the first call to partial_fit. Subsequent calls to partial_fit should check that ``classes`` is still consistent with a previous value of ``clf.classes_`` when provided. This function returns True if it detects that this was the first call to ``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also set on ``clf``. """ if getattr(clf, 'classes_', None) is None and classes is None: raise ValueError("classes must be passed on the first call " "to partial_fit.") elif classes is not None: if getattr(clf, 'classes_', None) is not None: if not np.all(clf.classes_ == unique_labels(classes)): raise ValueError( "`classes=%r` is not the same as on last call " "to partial_fit, was: %r" % (classes, clf.classes_)) else: # This is the first call to partial_fit clf.classes_ = unique_labels(classes) return True # classes is None and clf.classes_ has already previously been set: # nothing to do return False def class_distribution(y, sample_weight=None): """Compute class priors from multioutput-multiclass target data Parameters ---------- y : array like or sparse matrix of size (n_samples, n_outputs) The labels for each example. sample_weight : array-like of shape = (n_samples,), optional Sample weights. Returns ------- classes : list of size n_outputs of arrays of size (n_classes,) List of classes for each column. n_classes : list of integrs of size n_outputs Number of classes in each column class_prior : list of size n_outputs of arrays of size (n_classes,) Class distribution of each column. """ classes = [] n_classes = [] class_prior = [] n_samples, n_outputs = y.shape if issparse(y): y = y.tocsc() y_nnz = np.diff(y.indptr) for k in range(n_outputs): col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]] # separate sample weights for zero and non-zero elements if sample_weight is not None: nz_samp_weight = np.asarray(sample_weight)[col_nonzero] zeros_samp_weight_sum = (np.sum(sample_weight) - np.sum(nz_samp_weight)) else: nz_samp_weight = None zeros_samp_weight_sum = y.shape[0] - y_nnz[k] classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]], return_inverse=True) class_prior_k = bincount(y_k, weights=nz_samp_weight) # An explicit zero was found, combine its wieght with the wieght # of the implicit zeros if 0 in classes_k: class_prior_k[classes_k == 0] += zeros_samp_weight_sum # If an there is an implict zero and it is not in classes and # class_prior, make an entry for it if 0 not in classes_k and y_nnz[k] < y.shape[0]: classes_k = np.insert(classes_k, 0, 0) class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum) classes.append(classes_k) n_classes.append(classes_k.shape[0]) class_prior.append(class_prior_k / class_prior_k.sum()) else: for k in range(n_outputs): classes_k, y_k = np.unique(y[:, k], return_inverse=True) classes.append(classes_k) n_classes.append(classes_k.shape[0]) class_prior_k = bincount(y_k, weights=sample_weight) class_prior.append(class_prior_k / class_prior_k.sum()) return (classes, n_classes, class_prior)
bsd-3-clause
Aasmi/scikit-learn
sklearn/linear_model/tests/test_passive_aggressive.py
121
6117
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.base import ClassifierMixin from sklearn.utils import check_random_state from sklearn.datasets import load_iris from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import PassiveAggressiveRegressor iris = load_iris() random_state = check_random_state(12) indices = np.arange(iris.data.shape[0]) random_state.shuffle(indices) X = iris.data[indices] y = iris.target[indices] X_csr = sp.csr_matrix(X) class MyPassiveAggressive(ClassifierMixin): def __init__(self, C=1.0, epsilon=0.01, loss="hinge", fit_intercept=True, n_iter=1, random_state=None): self.C = C self.epsilon = epsilon self.loss = loss self.fit_intercept = fit_intercept self.n_iter = n_iter def fit(self, X, y): n_samples, n_features = X.shape self.w = np.zeros(n_features, dtype=np.float64) self.b = 0.0 for t in range(self.n_iter): for i in range(n_samples): p = self.project(X[i]) if self.loss in ("hinge", "squared_hinge"): loss = max(1 - y[i] * p, 0) else: loss = max(np.abs(p - y[i]) - self.epsilon, 0) sqnorm = np.dot(X[i], X[i]) if self.loss in ("hinge", "epsilon_insensitive"): step = min(self.C, loss / sqnorm) elif self.loss in ("squared_hinge", "squared_epsilon_insensitive"): step = loss / (sqnorm + 1.0 / (2 * self.C)) if self.loss in ("hinge", "squared_hinge"): step *= y[i] else: step *= np.sign(y[i] - p) self.w += step * X[i] if self.fit_intercept: self.b += step def project(self, X): return np.dot(X, self.w) + self.b def test_classifier_accuracy(): for data in (X, X_csr): for fit_intercept in (True, False): clf = PassiveAggressiveClassifier(C=1.0, n_iter=30, fit_intercept=fit_intercept, random_state=0) clf.fit(data, y) score = clf.score(data, y) assert_greater(score, 0.79) def test_classifier_partial_fit(): classes = np.unique(y) for data in (X, X_csr): clf = PassiveAggressiveClassifier(C=1.0, fit_intercept=True, random_state=0) for t in range(30): clf.partial_fit(data, y, classes) score = clf.score(data, y) assert_greater(score, 0.79) def test_classifier_refit(): # Classifier can be retrained on different labels and features. clf = PassiveAggressiveClassifier().fit(X, y) assert_array_equal(clf.classes_, np.unique(y)) clf.fit(X[:, :-1], iris.target_names[y]) assert_array_equal(clf.classes_, iris.target_names) def test_classifier_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 for loss in ("hinge", "squared_hinge"): clf1 = MyPassiveAggressive(C=1.0, loss=loss, fit_intercept=True, n_iter=2) clf1.fit(X, y_bin) for data in (X, X_csr): clf2 = PassiveAggressiveClassifier(C=1.0, loss=loss, fit_intercept=True, n_iter=2, shuffle=False) clf2.fit(data, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2) def test_classifier_undefined_methods(): clf = PassiveAggressiveClassifier() for meth in ("predict_proba", "predict_log_proba", "transform"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth) def test_regressor_mse(): y_bin = y.copy() y_bin[y != 1] = -1 for data in (X, X_csr): for fit_intercept in (True, False): reg = PassiveAggressiveRegressor(C=1.0, n_iter=50, fit_intercept=fit_intercept, random_state=0) reg.fit(data, y_bin) pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) def test_regressor_partial_fit(): y_bin = y.copy() y_bin[y != 1] = -1 for data in (X, X_csr): reg = PassiveAggressiveRegressor(C=1.0, fit_intercept=True, random_state=0) for t in range(50): reg.partial_fit(data, y_bin) pred = reg.predict(data) assert_less(np.mean((pred - y_bin) ** 2), 1.7) def test_regressor_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"): reg1 = MyPassiveAggressive(C=1.0, loss=loss, fit_intercept=True, n_iter=2) reg1.fit(X, y_bin) for data in (X, X_csr): reg2 = PassiveAggressiveRegressor(C=1.0, loss=loss, fit_intercept=True, n_iter=2, shuffle=False) reg2.fit(data, y_bin) assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2) def test_regressor_undefined_methods(): reg = PassiveAggressiveRegressor() for meth in ("transform",): assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
bsd-3-clause
hugolouzada/ImpeachmentPrediction
Prediction/SplitTrainValid/getCompleteVoteData.py
1
2023
from random import random from DataGathering.getNameList import getNameList from DataGathering.getVoteData import getVoteData from DataTransform import getSpeechSize, getLexicalDiversity, getAverageWordSize, getProperNamePresence, addTopDifferentWordsPresence from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import Normalizer from sklearn.feature_extraction import DictVectorizer import pandas as pd def getCompleteVoteData(addEstadoPartidoOrdem = False,addCalculatedFeatures = False, addTopDifferentWords=False): votes = getVoteData() namesList = getNameList() if addCalculatedFeatures: votes['SpeechSize'] = getSpeechSize(votes) votes['LexicalDiversity'] = getLexicalDiversity(votes) votes['AverageWordSize'] = getAverageWordSize(votes) votes['NamesPresence'] = getProperNamePresence(votes, namesList) normalizer = Normalizer(copy=False) normalizer.fit_transform(votes['SpeechSize']) normalizer.fit_transform(votes['LexicalDiversity']) normalizer.fit_transform(votes['AverageWordSize']) if addTopDifferentWords: addTopDifferentWordsPresence(votes) if addEstadoPartidoOrdem: categoricalValues = votes[['Estado','Partido']] categoricalDict = categoricalValues.T.to_dict().values() vec = DictVectorizer() multiCategory = pd.DataFrame(vec.fit_transform(categoricalDict).toarray()) multiCategory.columns = vec.get_feature_names() votes = pd.concat([votes, multiCategory], axis=1) votes.drop('Discurso', axis=1, inplace=True) votes.drop('Estado', axis=1, inplace=True) votes.drop('Partido', axis=1, inplace=True) if not addEstadoPartidoOrdem: votes.drop('Ordem', axis=1, inplace=True) if (not addCalculatedFeatures) and (not addTopDifferentWords) and (not addEstadoPartidoOrdem): votes['RandomFeature'] = votes['Vote'].map(lambda x: random()) return votes # getCompleteVoteData(addEstadoPartidoOrdem=True)
gpl-3.0
tapomayukh/projects_in_python
clustering/hmm_taxel_based_foliage.py
1
13968
#!/usr/bin/env python # Online haptic_map implementation import pylab as pyl import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy as scp import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy import tf import os #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.transforms as tr import hrl_lib.matplotlib_util as mpu import copy import pickle import optparse import unittest import ghmm import ghmmwrapper import random from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray as TaxelArray_Meka #from m3skin_ros.msg import TaxelArray as TaxelArray_Meka from visualization_msgs.msg import Marker from visualization_msgs.msg import MarkerArray class HMM_Model: def __init__(self, Fmat, Foliage_Trials, Trunk_Trials, num_features, num_states): self.F = ghmm.Float() # emission domain of HMM model self.Fmat = Fmat self.Foliage_Trials = Foliage_Trials self.Trunk_Trials = Trunk_Trials self.num_features = num_features self.number_states = num_states # Getting mean / covariance def mean_cov(self, start_Trials, end_Trials): # start_Trials = 0 for Foliage, Foliage_trials for Trunk # end_Trials = Foliage_Trials for Foliage, Foliage_Trials + Trunk_Trials for Trunk # Params mu = {} feature_final_data = {} state = {} Feature = {} for i in range(self.num_features): mu[i] = np.zeros((self.number_states,1)) feature_final_data[i] = [0.0]*self.number_states state[i] = [0.0] Feature[i] = [] i = start_Trials while (i < end_Trials): data_length = len(self.Fmat[i]) feature_length = data_length/self.num_features sample_length = feature_length/self.number_states for k in range(self.num_features): Feature[k] = self.Fmat[i][feature_length*k:feature_length*(k+1)] if i == start_Trials: for k in range(self.num_features): j = 0 while (j < self.number_states): feature_final_data[k][j] = Feature[k][sample_length*j:sample_length*(j+1)] j=j+1 else: for k in range(self.num_features): j = 0 while (j < self.number_states): state[k] = Feature[k][sample_length*j:sample_length*(j+1)] #print np.shape(state_1) #print np.shape(feature_1_final_data[j]) feature_final_data[k][j] = feature_final_data[k][j]+state[k] j=j+1 i = i+1 if self.num_features == 1: cov = np.zeros((self.number_states,1)) else: cov = np.zeros((self.number_states,self.num_features,self.num_features)) for k in range(self.num_features): j = 0 while (j < self.number_states): mu[k][j] = np.mean(feature_final_data[k][j]) j = j+1 if self.num_features == 1: j = 0 while (j < self.number_states): cov[j] = scp.std(feature_final_data[0][j]) j = j+1 elif self.num_features == 2: j = 0 while (j < self.number_states): cov[j] = np.cov(np.array([sum(feature_final_data[0][j],[]),sum(feature_final_data[1][j],[])])) j = j+1 elif self.num_features == 3: j = 0 while (j < self.number_states): cov[j] = np.cov(np.array([sum(feature_final_data[0][j],[]),sum(feature_final_data[1][j],[])]), sum(feature_final_data[2][j])) j = j+1 return mu, cov def calculate_A_B_pi(self, flag): # A - Transition Matrix if self.number_states == 3: A = [[0.2, 0.5, 0.3], [0.0, 0.5, 0.5], [0.0, 0.0, 1.0]] elif self.number_states == 5: A = [[0.2, 0.35, 0.2, 0.15, 0.1], [0.0, 0.2, 0.45, 0.25, 0.1], [0.0, 0.0, 0.2, 0.55, 0.25], [0.0, 0.0, 0.0, 0.2, 0.8], [0.0, 0.0, 0.0, 0.0, 1.0]] elif self.number_states == 10: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] elif self.number_states == 15: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]] elif self.number_states == 20: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] # B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma) B = [0.0]*self.number_states if flag == 'Foliage': mu, cov = self.mean_cov(0, self.Foliage_Trials) elif flag == 'Trunk': mu, cov = self.mean_cov(self.Foliage_Trials, (self.Foliage_Trials + self.Trunk_Trials)) if self.num_features == 1: for num_states in range(self.number_states): B[num_states] = [mu[0][num_states][0],cov[num_states][0]] elif self.num_features == 2: for num_states in range(self.number_states): B[num_states] = [[mu[0][num_states][0],mu[1][num_states][0]],[cov[num_states][0][0], cov[num_states][0][1], cov[num_states][1][0], cov[num_states][1][1]]] elif self.num_features == 3: for num_states in range(self.number_states): B[num_states] = [[mu[0][num_states][0],mu[1][num_states][0],mu[2][num_states][0]],[cov[num_states][0][0], cov[num_states][0][1], cov[num_states][0][2], cov[num_states][1][0], cov[num_states][1][1], cov[num_states][1][2], cov[num_states][2][0], cov[num_states][2][1], cov[num_states][2][2]]] # pi - initial probabilities per state if self.number_states == 3: pi = [1./3.] * 3 elif self.number_states == 5: pi = [0.2] * 5 elif self.number_states == 10: pi = [0.1] * 10 elif self.number_states == 15: pi = [1./15.] * 15 elif self.number_states == 20: pi = [0.05] * 20 #print B return A, B, pi def create_model(self, flag): A, B, pi = self.calculate_A_B_pi(flag) # generate models from parameters if self.num_features == 1: model = ghmm.HMMFromMatrices(self.F,ghmm.GaussianDistribution(self.F), A, B, pi) # Will be Trained else: model = ghmm.HMMFromMatrices(self.F,ghmm.MultivariateGaussianDistribution(self.F), A, B, pi) # Will be Trained #print A, B, pi print "Model Created for:", flag #raw_input('Press any key when ready') return model def train(self, model, flag): total_seq_old = [] total_seq = [] total_seq_old = copy.deepcopy(self.Fmat) for i in range((self.Foliage_Trials + self.Trunk_Trials)): total_seq_old[i][:] = sum(total_seq_old[i][:],[]) total_seq = copy.deepcopy(total_seq_old) for i in range((self.Foliage_Trials + self.Trunk_Trials)): seq_length = len(total_seq_old[i]) if self.num_features == 2: j = 0 while j < seq_length/2: total_seq[i][j] = total_seq_old[i][j] total_seq[i][j+1] = total_seq_old[i][j+seq_length/2] j=j+2 elif self.num_features == 3: j = 0 while j < seq_length/3: total_seq[i][j] = total_seq_old[i][j] total_seq[i][j+1] = total_seq_old[i][j+seq_length/3] total_seq[i][j+2] = total_seq_old[i][j+2*seq_length/3] j=j+3 if flag == 'Foliage': start_Trials = 0 end_Trials = self.Foliage_Trials elif flag == 'Trunk': start_Trials = self.Foliage_Trials end_Trials = self.Foliage_Trials + self.Trunk_Trials train_seq = total_seq[start_Trials:end_Trials] #print len(train_seq) #print "Training the HMM Model..." final_ts = ghmm.SequenceSet(self.F,train_seq) model.baumWelch(final_ts) #print "Model Trained: Ready to Collect Data !" print "Model Trained for:", flag return model def test(self, model, ts_obj): # Find Viterbi Path final_ts_obj = ghmm.EmissionSequence(self.F,ts_obj) path_obj = model.viterbi(final_ts_obj) return path_obj ######################################################################################################
mit
B3AU/waveTree
examples/linear_model/plot_bayesian_ridge.py
8
2553
""" ========================= Bayesian Ridge Regression ========================= Computes a Bayesian Ridge Regression on a synthetic dataset. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. As the prior on the weights is a Gaussian prior, the histogram of the estimated weights is Gaussian. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import pylab as pl from scipy import stats from sklearn.linear_model import BayesianRidge, LinearRegression ############################################################################### # Generating simulated data with Gaussian weigthts np.random.seed(0) n_samples, n_features = 100, 100 X = np.random.randn(n_samples, n_features) # Create gaussian data # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noise with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the Bayesian Ridge Regression and an OLS for comparison clf = BayesianRidge(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot true weights, estimated weights and histogram of the weights pl.figure(figsize=(6, 5)) pl.title("Weights of the model") pl.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate") pl.plot(w, 'g-', label="Ground truth") pl.plot(ols.coef_, 'r--', label="OLS estimate") pl.xlabel("Features") pl.ylabel("Values of the weights") pl.legend(loc="best", prop=dict(size=12)) pl.figure(figsize=(6, 5)) pl.title("Histogram of the weights") pl.hist(clf.coef_, bins=n_features, log=True) pl.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") pl.ylabel("Features") pl.xlabel("Values of the weights") pl.legend(loc="lower left") pl.figure(figsize=(6, 5)) pl.title("Marginal log-likelihood") pl.plot(clf.scores_) pl.ylabel("Score") pl.xlabel("Iterations") pl.show()
bsd-3-clause
scikit-hep/uproot
tests/test_issues.py
1
22552
#!/usr/bin/env python # BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE import sys import pytest import numpy import uproot3 import awkward0 import uproot_methods.classes.TVector3 import uproot_methods.classes.TLorentzVector class Test(object): def test_issue21(self): t = uproot3.open("tests/samples/issue21.root")["nllscan"] ### Explicit recover removed # assert t.array("mH").tolist() == [] # t.recover() assert t["mH"].numbaskets == 1 assert t["mH"].basket_entrystart(0) == 0 assert t["mH"].basket_entrystop(0) == 61 assert t["mH"].basket_numentries(0) == 61 assert t.array("mH").tolist() == [ 124.0, 124.09089660644531, 124.18180084228516, 124.27269744873047, 124.36360168457031, 124.45449829101562, 124.54550170898438, 124.63639831542969, 124.72730255126953, 124.81819915771484, 124.87000274658203, 124.87550354003906, 124.88089752197266, 124.88639831542969, 124.89179992675781, 124.89730072021484, 124.90270233154297, 124.908203125, 124.90910339355469, 124.9135971069336, 124.91909790039062, 124.92449951171875, 124.93000030517578, 124.98739624023438, 124.9906997680664, 124.99349975585938, 124.99590301513672, 124.9977035522461, 124.9990005493164, 124.99970245361328, 125.0, 125.00029754638672, 125.0009994506836, 125.0022964477539, 125.00409698486328, 125.00650024414062, 125.0093002319336, 125.01260375976562, 125.06999969482422, 125.07550048828125, 125.08090209960938, 125.0864028930664, 125.09089660644531, 125.091796875, 125.09729766845703, 125.10269927978516, 125.10820007324219, 125.11360168457031, 125.11910247802734, 125.12449645996094, 125.12999725341797, 125.18180084228516, 125.27269744873047, 125.36360168457031, 125.45449829101562, 125.54550170898438, 125.63639831542969, 125.72730255126953, 125.81819915771484, 125.90910339355469, 126.0 ] def test_issue30(self): uproot3.open("tests/samples/issue30.root") def test_issue31(self): t = uproot3.open("tests/samples/issue31.root")["T"] assert t.array("name").tolist() == [ b"one", b"two", b"three", b"four", b"five" ] def test_issue33(self): h = uproot3.open("tests/samples/issue33.root")["cutflow"] assert h.xlabels == [ "Dijet", "MET", "MuonVeto", "IsoMuonTrackVeto", "ElectronVeto", "IsoElectronTrackVeto", "IsoPionTrackVeto" ] def test_issue38(self): before_hadd = uproot3.open( "tests/samples/issue38a.root")["ntupler/tree"] after_hadd = uproot3.open("tests/samples/issue38b.root")["ntupler/tree"] before = before_hadd.arrays() after = after_hadd.arrays() assert set(before.keys()) assert set(after.keys()) for key in before.keys(): assert before[key].tolist() * 3 == after[key].tolist() def test_issue46(self): t = uproot3.open("tests/samples/issue46.root")["tree"] t["evt"].array(uproot3.asdebug) def test_issue49(self): t = uproot3.open("tests/samples/issue49.root")["nllscan"] t.arrays() def test_issue54(self): h = uproot3.open("tests/samples/hepdata-example.root")["hpx"] assert h._fFunctions[0]._fParent is h def test_issue55(self): withoffsets = uproot3.open( "tests/samples/small-dy-withoffsets.root")["tree"] nooffsets = uproot3.open( "tests/samples/small-dy-nooffsets.root")["tree"] assert numpy.array_equal(withoffsets.array("nJet"), nooffsets.array("nJet")) assert numpy.array_equal(withoffsets.array("nMuon"), nooffsets.array("nMuon")) def equal(left, right): if len(left) != len(right): return False for x, y in zip(left, right): if not numpy.array_equal(x, y): return False return True assert equal(withoffsets.array("Jet_jetId"), nooffsets.array("Jet_jetId")) assert equal(withoffsets.array("Jet_pt"), nooffsets.array("Jet_pt")) assert equal(withoffsets.array("MET_pt"), nooffsets.array("MET_pt")) assert equal(withoffsets.array("Muon_charge"), nooffsets.array("Muon_charge")) assert equal(withoffsets.array("Muon_pt"), nooffsets.array("Muon_pt")) assert equal(withoffsets.array("event"), nooffsets.array("event")) def test_issue57(self): tree = uproot3.open("tests/samples/issue57.root")["outtree"] for x in tree["sel_lep"].array(): for y in x: assert isinstance( y, uproot_methods.classes.TLorentzVector. Methods) and isinstance( y._fP, uproot_methods.classes.TVector3.Methods) for x in tree["selJet"].array(): for y in x: assert isinstance( y, uproot_methods.classes.TLorentzVector. Methods) and isinstance( y._fP, uproot_methods.classes.TVector3.Methods) def test_issue60(self): t = uproot3.open("tests/samples/issue60.root")["nllscan"] assert t["status"].numbaskets == 2 assert t["mH"].numbaskets == 3 assert (t["mH"].basket_entrystart(0), t["mH"].basket_entrystart(1), t["mH"].basket_entrystart(2)) == (0, 3990, 7980) assert (t["mH"].basket_entrystop(0), t["mH"].basket_entrystop(1), t["mH"].basket_entrystop(2)) == (3990, 7980, 11535) assert (t["mH"].basket_numentries(0), t["mH"].basket_numentries(1), t["mH"].basket_numentries(2)) == (3990, 3990, 3555) assert t.array("mH")[:10].tolist() == [ 125.3575896071691, 124.75819175713684, 124.79865223661515, 125.13239376420276, 125.19612659731995, 125.33001837818416, 124.93261741760551, 125.02903289132837, 124.65206649938854, 125.50663519903532 ] assert t.array("mH")[-10:].tolist() == [ 125.5150930345707, 125.00248572708085, 124.55838505657864, 125.03766816520313, 125.27765299737514, 124.9976442776121, 124.8339210081154, 124.62415638855144, 125.33988981473144, 124.93384515492096 ] def test_issue63(self): t = uproot3.open("tests/samples/issue63.root")["WtLoop_meta"] assert t["initialState"].array().tolist() == [b"Wt"] assert t["generator"].array().tolist() == [b"PowhegPythia6"] assert t["sampleType"].array().tolist() == [b"Nominal"] assert t["campaign"].array().tolist() == [b"MC16a"] def test_issue64(self): t = uproot3.open("tests/samples/issue64.root")["events/events"] assert t["e_pri"].array().tolist() == [0.00698000006377697] * 500 def test_issue66(self): f = uproot3.open("tests/samples/issue66.root") h, = f.values() assert h.values.tolist() == [ 4814.0, 45.0, 45.0, 25.0, 15.0, 4.0, 0.0, 6.0, 7.0, 5.0, 3.0, 3.0, 6.0, 3.0, 7.0, 5.0, 7.0, 11.0, 9.0, 5.0, 4.0, 10.0, 12.0, 7.0, 10.0, 8.0, 12.0, 11.0, 12.0, 12.0, 14.0, 15.0, 13.0, 14.0, 14.0, 20.0, 20.0, 16.0, 21.0, 22.0, 22.0, 28.0, 25.0, 33.0, 26.0, 21.0, 42.0, 36.0, 43.0, 42.0, 43.0, 39.0, 42.0, 56.0, 67.0, 50.0, 67.0, 71.0, 59.0, 76.0, 73.0, 84.0, 63.0, 76.0, 84.0, 97.0, 91.0, 100.0, 108.0, 121.0, 129.0, 137.0, 127.0, 141.0, 152.0, 147.0, 166.0, 158.0, 166.0, 159.0, 146.0, 176.0, 189.0, 213.0, 212.0, 228.0, 193.0, 232.0, 225.0, 210.0, 211.0, 229.0, 226.0, 237.0, 246.0, 243.0, 265.0, 303.0, 248.0, 302.0, 326.0, 318.0, 340.0, 362.0, 313.0, 366.0, 379.0, 376.0, 423.0, 433.0, 486.0, 486.0, 482.0, 518.0, 548.0, 583.0, 628.0, 705.0, 735.0, 814.0, 852.0, 920.0, 1000.0, 1095.0, 1184.0, 1296.0, 1544.0, 1700.0, 2091.0, 2738.0, 3794.0, 5591.0, 8640.0, 13619.0, 20171.0, 11051.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] def test_issue70(self): f = uproot3.open("tests/samples/issue70.root") assert f.keys() == [] def test_issue74(self): t = uproot3.open("tests/samples/issue74.root")["Events"] assert all( isinstance(x[0], uproot_methods.classes.TVector3.Methods) for x in t.array("bees.xyzPosition")) assert t.array("bees.xyzPosition" )[0][0] == uproot_methods.classes.TVector3.TVector3( 1.0, 2.0, -1.0) def test_issue76(self): t = uproot3.open("tests/samples/issue76.root")["Events"] assert list(t.array("rootStrings")[0]) == [b"2", b"4"] x, y = t.array("rootStrings")[0] assert isinstance(x, uproot3.rootio.TString) def test_issue79(self): t = uproot3.open("tests/samples/issue79.root")["taus"] assert t["pt"].numbaskets == 2 baskets = numpy.concatenate([t["pt"].basket(0), t["pt"].basket(1)]) assert baskets.shape == (t["pt"].numentries, ) assert numpy.array_equal(baskets, t["pt"].array()) def test_issue96(self): t = uproot3.open("tests/samples/issue96.root")["tree"] assert all( isinstance(x, uproot_methods.classes.TLorentzVector.Methods) for x in t.array("jet1P4")) def test_geant4(self): f = uproot3.open("tests/samples/from-geant4.root") arrays = f["Details"].arrays() assert arrays[b"numgood"][0] == 224 assert [len(x) for x in f["HitStrips"].arrays().values() ] == [4808, 4808, 4808] assert sum(f["edep_inner"].values) == 1547 assert sum(sum(x) for x in f["recon_orig"].values) == 141 ### file is too big to include # def test_issue168(self): # t = uproot3.open("tests/samples/issue168.root")["Events"] # a1 = t["MRawEvtData.fHiGainFadcSamples"].array(t["MRawEvtData.fHiGainFadcSamples"].interpretation.speedbump(False), entrystop=4) # assert a1[0]._fArray.shape == (108400,) # a2 = t["MRawEvtData.fHiGainPixId"].array(t["MRawEvtData.fHiGainPixId"].interpretation.speedbump(False)) # assert a2[0]._fArray.shape == (1084,) def test_issue187(self): t = uproot3.open("tests/samples/issue187.root")["fTreeV0"] assert (t.array("fMultiplicity") == -1).all() assert t.array("V0s.fEtaPos")[-3].tolist() == [-0.390625, 0.046875] def test_issue213(self): pytest.importorskip("xxhash") t = uproot3.open("tests/samples/issue213.root")["T"] assert t["fMCHits.fPosition"].array().x.tolist() == [ [], [], [], [], [], [], [], [42.17024612426758, 50.63192367553711], [], [], [], [43.292755126953125], [], [], [], [], [], [], [], [], [42.15415954589844], [41.60139083862305], [42.95103454589844], [], [41.55511474609375], [], [], [], [], [], [], [42.549156188964844], [], [], [], [42.80044174194336, 46.136253356933594], [], [], [], [], [41.58171081542969], [], [], [42.741485595703125], [41.228477478027344], [], [], [], [], [], [], [], [], [], [42.518882751464844], [43.34626388549805], [], [], [43.214759826660156], [], [], [], [], [], [], [42.78463363647461], [], [], [], [], [], [], [], [41.927093505859375], [42.65863037109375], [], [42.66266632080078], [], [], [], [], [], [], [], [], [], [], [41.91042709350586, 41.807674407958984], [], [42.79293441772461], [], [], [], [], [], [], [41.72440719604492], [], [], [41.609615325927734] ] def test_issue232(self): pytest.importorskip("pandas") t = uproot3.open("tests/samples/issue232.root")["fTreeV0"] t.pandas.df( ["V0Hyper.fNsigmaHe3Pos", "V0Hyper.fDcaPos2PrimaryVertex"], flatten=True) def test_issue240(self): pytest.importorskip("pyxrootd") t = uproot3.open( "root://eospublic.cern.ch//eos/root-eos/cms_opendata_2012_nanoaod/Run2012B_DoubleMuParked.root" )["Events"] assert (abs(t.array("nMuon", entrystop=100000)) < 50).all() def test_issue243(self): t = uproot3.open("tests/samples/issue243.root")["triggerList"] for x in t.array("triggerMap", entrystop=100): assert all(y == 1.0 for y in x.values()) def test_issue243_new(self): t = uproot3.open("tests/samples/issue243-new.root")["triggerList"] first = t["triggerMap.first"].array() second = t["triggerMap.second"].array() for i in range(t.numentries): x = dict(zip(first[i], second[i])) assert all(y == 1.0 for y in x.values()) def test_issue327(self): uproot3.open("tests/samples/issue327.root")["DstTree"] def test_issue371(self): t = uproot3.open("tests/samples/issue371.root")["Event"] obj = t["DRIFT_0."].array()[0] assert obj._samplerName == b'DRIFT_0' assert obj._n == 1 assert obj._energy[0] == numpy.array([2.3371024], dtype=numpy.float32)[0] def test_issue376_simple(self): f = uproot3.open("tests/samples/from-geant4.root") assert type(f).classname == 'TDirectory' assert f.classname == 'TDirectory' real_class_names = ['TTree'] * 4 + ['TH1D'] * 10 + ['TH2D'] * 5 assert [ classname_two_tuple[1] for classname_two_tuple in f.classnames() ] == real_class_names assert [ class_two_tuple[1].classname for class_two_tuple in f.classes() ] == real_class_names assert [value.classname for value in f.values()] == real_class_names def test_issue376_nested(self): f = uproot3.open("tests/samples/nesteddirs.root") top_level_class_names = ['TDirectory', 'TDirectory'] recursive_class_names = [ 'TDirectory', 'TDirectory', 'TTree', 'TTree', 'TDirectory', 'TTree' ] assert [ classname_two_tuple[1] for classname_two_tuple in f.classnames(recursive=False) ] == top_level_class_names assert [ classname_two_tuple[1] for classname_two_tuple in f.classnames(recursive=True) ] == recursive_class_names assert [ classname_two_tuple[1] for classname_two_tuple in f.allclassnames() ] == recursive_class_names def test_issue367(self): t = uproot3.open("tests/samples/issue367.root")["tree"] assert awkward0.fromiter( t.array("weights.second"))[0].counts.tolist() == [ 1000, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 100, 100, 100, 1 ] def test_issue390(self): pytest.importorskip("pandas") t = uproot3.open("tests/samples/issue390.root")["E"] t.pandas.df("hits.*") t.pandas.df("trks.*") def test_issue399(self): t = uproot3.open("tests/samples/issue399.root")["Event"] a = t["Histos.histograms1D"].array() for i in range(t.numentries): assert [x.title for x in a[i]] == [ b"Primary Hits", b"Primary Loss", b"Energy Loss", b"Primary Hits per Element", b"Primary Loss per Element", b"Energy Loss per Element" ] def test_issue404(self): t = uproot3.open("tests/samples/issue404.root")["Beam"] assert t["Beam.GMAD::BeamBase.beamParticleName"].array().tolist() == [ b"proton" ] def test_issue124_and_followup_issue419_with_pr420(self): f = uproot3.open("tests/samples/issue124.root") branch = f[b'KM3NET_TIMESLICE;1'][b'KM3NET_TIMESLICE'] assert branch.interpretation is None assert 0 == branch.compressedbytes() assert 0 == branch.uncompressedbytes() assert 0 == branch.numbaskets def test_issue429(self): if sys.version_info[0] >= 3: fix = lambda name: name.decode("utf-8") else: fix = lambda name: name file = uproot3.open("tests/samples/issue429.root") tree = file["data_tr"] branch = tree["data_ana_kk"] # FIXME: how can uproot3.interp.auto.interpret *infer* the 4 bytes of padding? dtype = [(fix(x._fName), "float32" if type(x).__name__ == "TLeafF" else "int32") for x in branch._fLeaves] array = branch.array(uproot3.asdtype(dtype + [("padding", "S4")])) assert (array["padding"] == b"\xff\xff\xff\xff").all() def test_issue431(self): file = uproot3.open("tests/samples/issue431.root") head = file["Head"] assert head._map_3c_string_2c_string_3e_ == {b'DAQ': b'394', b'PDF': b'4 58', b'XSecFile': b'', b'can': b'0 1027 888.4', b'can_user': b'0.00 1027.00 888.40', b'coord_origin': b'0 0 0', b'cut_in': b'0 0 0 0', b'cut_nu': b'100 1e+08 -1 1', b'cut_primary': b'0 0 0 0', b'cut_seamuon': b'0 0 0 0', b'decay': b'doesnt happen', b'detector': b'NOT', b'drawing': b'Volume', b'end_event': b'', b'genhencut': b'2000 0', b'genvol': b'0 1027 888.4 2.649e+09 100000', b'kcut': b'2', b'livetime': b'0 0', b'model': b'1 2 0 1 12', b'muon_desc_file': b'', b'ngen': b'0.1000E+06', b'norma': b'0 0', b'nuflux': b'0 3 0 0.500E+00 0.000E+00 0.100E+01 0.300E+01', b'physics': b'GENHEN 7.2-220514 181116 1138', b'seed': b'GENHEN 3 305765867 0 0', b'simul': b'JSirene 11012 11/17/18 07', b'sourcemode': b'diffuse', b'spectrum': b'-1.4', b'start_run': b'1', b'target': b'isoscalar', b'usedetfile': b'false', b'xlat_user': b'0.63297', b'xparam': b'OFF', b'zed_user': b'0.00 3450.00'} def test_issue434(self): f = uproot3.open("tests/samples/issue434.root") fromdtype = [("pmt", "u1"), ("tdc", "<u4"), ("tot", "u1")] todtype = [("pmt", "u1"), ("tdc", ">u4"), ("tot", "u1")] tree = f[b'KM3NET_TIMESLICE_L1'][b'KM3NETDAQ::JDAQTimeslice'] superframes = tree[b'vector<KM3NETDAQ::JDAQSuperFrame>'] hits_buffer = superframes[b'vector<KM3NETDAQ::JDAQSuperFrame>.buffer'] hits = hits_buffer.lazyarray( uproot3.asjagged( uproot3.astable( uproot3.asdtype(fromdtype, todtype)), skipbytes=6)) assert 486480 == hits['tdc'][0][0] def test_issue438_accessing_memory_mapped_objects_outside_of_context_raises(self): with uproot3.open("tests/samples/issue434.root") as f: a = f['KM3NET_EVENT']['KM3NET_EVENT']['KM3NETDAQ::JDAQPreamble'].array() b = f['KM3NET_EVENT']['KM3NET_EVENT']['KM3NETDAQ::JDAQPreamble'].lazyarray() assert 4 == len(a[0]) with pytest.raises(IOError): len(b[0]) def test_issue448(self): pytest.importorskip("pyxrootd") f = uproot3.open('root://eospublic.cern.ch//eos/opendata/cms/Run2010B/MuOnia/AOD/Apr21ReReco-v1/0000/02186E3C-D277-E011-8A05-00215E21D516.root') tree = f['Events'] assert len(tree.arrays(entrystop=0)) == 4179 assert len(tree.arrays('recoMuons_muons__RECO.*', entrystop=10)) == 93 @pytest.mark.parametrize("treename, branchtest", [ ('l1CaloTowerEmuTree/L1CaloTowerTree', b'L1CaloTowerTree/L1CaloCluster/phi'), ('l1CaloTowerTree/L1CaloTowerTree', b'L1CaloTowerTree/L1CaloTower/et'), ]) def test_issue447_tree_arrays_omitting_variables(self, treename, branchtest): with uproot3.open("tests/samples/issue447.root") as f: t1 = f[treename] arrays = t1.arrays(recursive=b'/') array_keys = arrays.keys() n_array_vars = len(array_keys) n_tree_vars = sum([len(t1[k].keys()) for k in t1.keys()]) assert n_tree_vars == n_array_vars assert branchtest in array_keys def test_issue447_recursive_provenance(self): expectedKeys = [ 'tree/b1', 'tree/b1/b2', 'tree/b1/b2/b3', 'tree/b1/b2/b3/b4', ] expectedKeys = sorted([k.encode(encoding='UTF-8') for k in expectedKeys]) with uproot3.open('tests/samples/issue447_recursive.root') as f: t1 = f['tree'] arrays = t1.arrays(recursive=b'/') assert sorted(list(arrays.keys())) == expectedKeys def test_issue444_subbranche_lookup_with_slash(self): # Uses same test file as issue #447 with uproot3.open("tests/samples/issue447.root") as f: # Access subbranches directly from file assert numpy.all(f['l1CaloTowerEmuTree/L1CaloTowerTree/CaloTP']['nECALTP'].array() == f['l1CaloTowerEmuTree/L1CaloTowerTree/CaloTP/nECALTP'].array()) # Access subbranches from TTree tree = f['l1CaloTowerEmuTree/L1CaloTowerTree'] assert numpy.all(tree['CaloTP']['nECALTP'].array() == tree['CaloTP/nECALTP'].array()) # Test different recursive schemes assert b'CaloTP/nECALTP' in tree.keys(recursive='/') assert b'CaloTP/nECALTP' not in tree.keys(recursive=True) assert b'CaloTP/nECALTP' not in tree.keys(recursive=False) assert b'nECALTP' not in tree.keys(recursive='/') assert b'nECALTP' in tree.keys(recursive=True) assert b'nECALTP' not in tree.keys(recursive=False) # Specify subbranches in iterate for arrays in uproot3.iterate(["tests/samples/issue447.root"], 'l1CaloTowerEmuTree/L1CaloTowerTree', ['CaloTP/nECALTP']): pass
bsd-3-clause
plotly/plotly.py
packages/python/chart-studio/chart_studio/tests/test_optional/test_matplotlylib/test_plot_mpl.py
2
1582
""" test_plot_mpl: ============== A module intended for use with Nose. """ from __future__ import absolute_import import _plotly_utils.exceptions from plotly import optional_imports from chart_studio.plotly import plotly as py from unittest import TestCase import pytest matplotlylib = optional_imports.get_module("plotly.matplotlylib") if matplotlylib: import matplotlib.pyplot as plt @pytest.mark.matplotlib class PlotMPLTest(TestCase): def setUp(self): py.sign_in("PlotlyImageTest", "786r5mecv0", plotly_domain="https://plotly.com") def test_update_type_error(self): fig, ax = plt.subplots() ax.plot([1, 2, 3]) update = [] with pytest.raises(_plotly_utils.exceptions.PlotlyGraphObjectError): py.plot_mpl(fig, update=update, filename="nosetests", auto_open=False) def test_update_validation_error(self): fig, ax = plt.subplots() ax.plot([1, 2, 3]) update = {"invalid": "anything"} with pytest.raises(KeyError): py.plot_mpl(fig, update=update, filename="nosetests", auto_open=False) def test_update(self): fig, ax = plt.subplots() ax.plot([1, 2, 3]) title = "new title" update = {"layout": {"title": title}} url = py.plot_mpl(fig, update=update, filename="nosetests", auto_open=False) un = url.replace("https://plotly.com/~", "").split("/")[0] fid = url.replace("https://plotly.com/~", "").split("/")[1] pfig = py.get_figure(un, fid) assert pfig["layout"]["title"]["text"] == title
mit
natanielruiz/android-yolo
jni-build/jni/include/tensorflow/examples/skflow/iris_custom_model.py
5
2554
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of Estimator for Iris plant dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import cross_validation from sklearn import datasets from sklearn import metrics import tensorflow as tf from tensorflow.contrib import layers from tensorflow.contrib import learn def my_model(features, target): """DNN with three hidden layers, and dropout of 0.1 probability.""" # Convert the target to a one-hot tensor of shape (length of features, 3) and # with a on-value of 1 for each one-hot vector of length 3. target = tf.one_hot(target, 3, 1, 0) # Create three fully connected layers respectively of size 10, 20, and 10 with # each layer having a dropout probability of 0.1. normalizer_fn = layers.dropout normalizer_params = {'keep_prob': 0.9} features = layers.stack(features, layers.fully_connected, [10, 20, 10], normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) # Create two tensors respectively for prediction and loss. prediction, loss = ( tf.contrib.learn.models.logistic_regression(features, target) ) # Create a tensor for training op. train_op = tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = cross_validation.train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) classifier = learn.Estimator(model_fn=my_model) classifier.fit(x_train, y_train, steps=1000) y_predicted = classifier.predict(x_test) score = metrics.accuracy_score(y_test, y_predicted['class']) print('Accuracy: {0:f}'.format(score)) if __name__ == '__main__': tf.app.run()
apache-2.0
nerdless/lifelines
lifelines/utils/__init__.py
3
34875
# -*- coding: utf-8 -*- from __future__ import print_function, division import warnings from datetime import datetime import numpy as np from numpy.linalg import inv import pandas as pd from pandas import to_datetime class StatError(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) def qth_survival_times(q, survival_functions): """ This can be done much better. Parameters: q: a float between 0 and 1. survival_functions: a (n,d) dataframe or numpy array. If dataframe, will return index values (actual times) If numpy array, will return indices. Returns: v: if d==1, returns a float, np.inf if infinity. if d > 1, an DataFrame containing the first times the value was crossed. """ q = pd.Series(q) assert (q <= 1).all() and (0 <= q).all(), 'q must be between 0 and 1' survival_functions = pd.DataFrame(survival_functions) if survival_functions.shape[1] == 1 and q.shape == (1,): return survival_functions.apply(lambda s: qth_survival_time(q[0], s)).ix[0] else: return pd.DataFrame({_q: survival_functions.apply(lambda s: qth_survival_time(_q, s)) for _q in q}) def qth_survival_time(q, survival_function): """ Expects a Pandas series, returns the time when the qth probability is reached. """ if survival_function.iloc[-1] > q: return np.inf v = (survival_function <= q).idxmax(0) return v def median_survival_times(survival_functions): return qth_survival_times(0.5, survival_functions) def group_survival_table_from_events(groups, durations, event_observed, birth_times=None, limit=-1): """ Joins multiple event series together into dataframes. A generalization of `survival_table_from_events` to data with groups. Previously called `group_event_series` pre 0.2.3. Parameters: groups: a (n,) array of individuals' group ids. durations: a (n,) array of durations of each individual event_observed: a (n,) array of event observations, 1 if observed, 0 else. birth_times: a (n,) array of numbers representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. Normally set to all zeros, but can be positive or negative. Output: - np.array of unique groups - dataframe of removal count data at event_times for each group, column names are 'removed:<group name>' - dataframe of observed count data at event_times for each group, column names are 'observed:<group name>' - dataframe of censored count data at event_times for each group, column names are 'censored:<group name>' Example: #input group_survival_table_from_events(waltonG, waltonT, np.ones_like(waltonT)) #data available in test_suite.py #output [ array(['control', 'miR-137'], dtype=object), removed:control removed:miR-137 event_at 6 0 1 7 2 0 9 0 3 13 0 3 15 0 2 , observed:control observed:miR-137 event_at 6 0 1 7 2 0 9 0 3 13 0 3 15 0 2 , censored:control censored:miR-137 event_at 6 0 0 7 0 0 9 0 0 , ] """ n = np.max(groups.shape) assert n == np.max(durations.shape) == np.max(event_observed.shape), "inputs must be of the same length." if birth_times is None: # Create some birth times birth_times = np.zeros(np.max(durations.shape)) birth_times[:] = np.min(durations) assert n == np.max(birth_times.shape), "inputs must be of the same length." groups, durations, event_observed, birth_times = [pd.Series(np.reshape(data, (n,))) for data in [groups, durations, event_observed, birth_times]] unique_groups = groups.unique() for i, group in enumerate(unique_groups): ix = groups == group T = durations[ix] C = event_observed[ix] B = birth_times[ix] group_name = str(group) columns = [event_name + ":" + group_name for event_name in ['removed', 'observed', 'censored', 'entrance', 'at_risk']] if i == 0: data = survival_table_from_events(T, C, B, columns=columns) else: data = data.join(survival_table_from_events(T, C, B, columns=columns), how='outer') data = data.fillna(0) # hmmm pandas its too bad I can't do data.ix[:limit] and leave out the if. if int(limit) != -1: data = data.ix[:limit] return unique_groups, data.filter(like='removed:'), data.filter(like='observed:'), data.filter(like='censored:') def survival_table_from_events(death_times, event_observed, birth_times=None, columns=["removed", "observed", "censored", "entrance", "at_risk"], weights=None): """ Parameters: death_times: (n,) array of event times event_observed: (n,) boolean array, 1 if observed event, 0 is censored event. birth_times: a (n,) array of numbers representing when the subject was first observed. A subject's death event is then at [birth times + duration observed]. If None (default), birth_times are set to be the first observation or 0, which ever is smaller. columns: a 3-length array to call the, in order, removed individuals, observed deaths and censorships. weights: Default None, otherwise (n,1) array. Optional argument to use weights for individuals. Returns: Pandas DataFrame with index as the unique times in event_times. The columns named 'removed' refers to the number of individuals who were removed from the population by the end of the period. The column 'observed' refers to the number of removed individuals who were observed to have died (i.e. not censored.) The column 'censored' is defined as 'removed' - 'observed' (the number of individuals who left the population due to event_observed) Example: removed observed censored entrance at_risk event_at 0 0 0 0 11 11 6 1 1 0 0 11 7 2 2 0 0 10 9 3 3 0 0 8 13 3 3 0 0 5 15 2 2 0 0 2 """ removed, observed, censored, entrance, at_risk = columns death_times = np.asarray(death_times) if birth_times is None: birth_times = min(0, death_times.min()) * np.ones(death_times.shape[0]) else: birth_times = np.asarray(birth_times) if np.any(birth_times > death_times): raise ValueError('birth time must be less than time of death.') # deal with deaths and censorships df = pd.DataFrame(death_times, columns=["event_at"]) df[removed] = 1 if weights is None else weights df[observed] = np.asarray(event_observed) death_table = df.groupby("event_at").sum() death_table[censored] = (death_table[removed] - death_table[observed]).astype(int) # deal with late births births = pd.DataFrame(birth_times, columns=['event_at']) births[entrance] = 1 births_table = births.groupby('event_at').sum() event_table = death_table.join(births_table, how='outer', sort=True).fillna(0) # http://wesmckinney.com/blog/?p=414 event_table[at_risk] = event_table[entrance].cumsum() - event_table[removed].cumsum().shift(1).fillna(0) return event_table.astype(float) def survival_events_from_table(event_table, observed_deaths_col="observed", censored_col="censored"): """ This is the inverse of the function ``survival_table_from_events``. Parameters event_table: a pandas DataFrame with index as the durations (!!) and columns "observed" and "censored", referring to the number of individuals that died and were censored at time t. Returns T: a np.array of durations of observation -- one element for each individual in the population. C: a np.array of event observations -- one element for each individual in the population. 1 if observed, 0 else. Ex: The survival table, as a pandas DataFrame: observed censored index 1 1 0 2 0 1 3 1 0 4 1 1 5 0 1 would return T = np.array([ 1., 2., 3., 4., 4., 5.]), C = np.array([ 1., 0., 1., 1., 0., 0.]) """ columns = [observed_deaths_col, censored_col] N = event_table[columns].sum().sum() T = np.empty(N) C = np.empty(N) i = 0 for event_time, row in event_table.iterrows(): n = row[columns].sum() T[i:i + n] = event_time C[i:i + n] = np.r_[np.ones(row[columns[0]]), np.zeros(row[columns[1]])] i += n return T, C def datetimes_to_durations(start_times, end_times, fill_date=datetime.today(), freq='D', dayfirst=False, na_values=None): """ This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters: start_times: an array, series or dataframe of start times. These can be strings, or datetimes. end_times: an array, series or dataframe of end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. Default: datetime.today() freq: the units of time to use. See pandas 'freq'. Default 'D' for days. day_first: convert assuming European-style dates, i.e. day/month/year. na_values : list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns: T: a array of floats representing the durations with time units given by freq. C: a boolean array of event observations: 1 if death observed, 0 else. """ fill_date = pd.to_datetime(fill_date) freq_string = 'timedelta64[%s]' % freq start_times = pd.Series(start_times).copy() end_times = pd.Series(end_times).copy() C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""])) end_times[~C] = fill_date start_times_ = to_datetime(start_times, dayfirst=dayfirst) end_times_ = to_datetime(end_times, dayfirst=dayfirst, coerce=True) deaths_after_cutoff = end_times_ > fill_date C[deaths_after_cutoff] = False T = (end_times_ - start_times_).map(lambda x: x.astype(freq_string).astype(float)) if (T < 0).sum(): warnings.warn("Warning: some values of start_times are after end_times") return T.values, C.values def l1_log_loss(event_times, predicted_event_times, event_observed=None): """ Calculates the l1 log-loss of predicted event times to true event times for *non-censored* individuals only. 1/N \sum_{i} |log(t_i) - log(q_i)| Parameters: event_times: a (n,) array of observed survival times. predicted_event_times: a (n,) array of predicted survival times. event_observed: a (n,) array of censorship flags, 1 if observed, 0 if not. Default None assumes all observed. Returns: l1-log-loss: a scalar """ if event_observed is None: event_observed = np.ones_like(event_times) ix = event_observed.astype(bool) return np.abs(np.log(event_times[ix]) - np.log(predicted_event_times[ix])).mean() def l2_log_loss(event_times, predicted_event_times, event_observed=None): """ Calculates the l2 log-loss of predicted event times to true event times for *non-censored* individuals only. 1/N \sum_{i} (log(t_i) - log(q_i))**2 Parameters: event_times: a (n,) array of observed survival times. predicted_event_times: a (n,) array of predicted survival times. event_observed: a (n,) array of censorship flags, 1 if observed, 0 if not. Default None assumes all observed. Returns: l2-log-loss: a scalar """ if event_observed is None: event_observed = np.ones_like(event_times) ix = event_observed.astype(bool) return np.power(np.log(event_times[ix]) - np.log(predicted_event_times[ix]), 2).mean() def concordance_index(event_times, predicted_event_times, event_observed=None): """ Calculates the concordance index (C-index) between two series of event times. The first is the real survival times from the experimental data, and the other is the predicted survival times from a model of some kind. The concordance index is a value between 0 and 1 where, 0.5 is the expected result from random predictions, 1.0 is perfect concordance and, 0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0) Score is usually 0.6-0.7 for survival models. See: Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors. Statistics in Medicine 1996;15(4):361-87. Parameters: event_times: a (n,) array of observed survival times. predicted_event_times: a (n,) array of predicted survival times. event_observed: a (n,) array of censorship flags, 1 if observed, 0 if not. Default None assumes all observed. Returns: c-index: a value between 0 and 1. """ event_times = np.array(event_times, dtype=float) predicted_event_times = np.array(predicted_event_times, dtype=float) # Allow for (n, 1) or (1, n) arrays if event_times.ndim == 2 and (event_times.shape[0] == 1 or event_times.shape[1] == 1): # Flatten array event_times = event_times.ravel() # Allow for (n, 1) or (1, n) arrays if (predicted_event_times.ndim == 2 and (predicted_event_times.shape[0] == 1 or predicted_event_times.shape[1] == 1)): # Flatten array predicted_event_times = predicted_event_times.ravel() if event_times.shape != predicted_event_times.shape: raise ValueError("Event times and predictions must have the same shape") if event_times.ndim != 1: raise ValueError("Event times can only be 1-dimensional: (n,)") if event_observed is None: event_observed = np.ones(event_times.shape[0], dtype=float) else: if event_observed.shape != event_times.shape: raise ValueError("Observed events must be 1-dimensional of same length as event times") event_observed = np.array(event_observed, dtype=float).ravel() return _concordance_index(event_times, predicted_event_times, event_observed) def coalesce(*args): return next(s for s in args if s is not None) def inv_normal_cdf(p): def AandS_approximation(p): # Formula 26.2.23 from A&S and help from John Cook ;) # http://www.johndcook.com/normal_cdf_inverse.html c_0 = 2.515517 c_1 = 0.802853 c_2 = 0.010328 d_1 = 1.432788 d_2 = 0.189269 d_3 = 0.001308 t = np.sqrt(-2 * np.log(p)) return t - (c_0 + c_1 * t + c_2 * t ** 2) / (1 + d_1 * t + d_2 * t * t + d_3 * t ** 3) if p < 0.5: return -AandS_approximation(p) else: return AandS_approximation(1 - p) def k_fold_cross_validation(fitters, df, duration_col, event_col=None, k=5, evaluation_measure=concordance_index, predictor="predict_median", predictor_kwargs={}): """ Perform cross validation on a dataset. If multiple models are provided, all models will train on each of the k subsets. fitter(s): one or several objects which possess a method: fit(self, data, duration_col, event_col) Note that the last two arguments will be given as keyword arguments, and that event_col is optional. The objects must also have the "predictor" method defined below. df: a Pandas dataframe with necessary columns `duration_col` and `event_col`, plus other covariates. `duration_col` refers to the lifetimes of the subjects. `event_col` refers to whether the 'death' events was observed: 1 if observed, 0 else (censored). duration_col: the column in dataframe that contains the subjects lifetimes. event_col: the column in dataframe that contains the subject's death observation. If left as None, assumes all individuals are non-censored. k: the number of folds to perform. n/k data will be withheld for testing on. evaluation_measure: a function that accepts either (event_times, predicted_event_times), or (event_times, predicted_event_times, event_observed) and returns something (could be anything). Default: statistics.concordance_index: (C-index) between two series of event times predictor: a string that matches a prediction method on the fitter instances. For example, "predict_expectation" or "predict_percentile". Default is "predict_median" The interface for the method is: predict(self, data, **optional_kwargs) predictor_kwargs: keyword args to pass into predictor-method. Returns: (k,1) list of scores for each fold. The scores can be anything. """ # Make sure fitters is a list try: fitters = list(fitters) except TypeError: fitters = [fitters] # Each fitter has its own scores fitterscores = [[] for _ in fitters] n, d = df.shape df = df.copy() if event_col is None: event_col = 'E' df[event_col] = 1. df = df.reindex(np.random.permutation(df.index)).sort(event_col) assignments = np.array((n // k + 1) * list(range(1, k + 1))) assignments = assignments[:n] testing_columns = df.columns - [duration_col, event_col] for i in range(1, k + 1): ix = assignments == i training_data = df.ix[~ix] testing_data = df.ix[ix] T_actual = testing_data[duration_col].values E_actual = testing_data[event_col].values X_testing = testing_data[testing_columns] for fitter, scores in zip(fitters, fitterscores): # fit the fitter to the training data fitter.fit(training_data, duration_col=duration_col, event_col=event_col) T_pred = getattr(fitter, predictor)(X_testing, **predictor_kwargs).values try: scores.append(evaluation_measure(T_actual, T_pred, E_actual)) except TypeError: scores.append(evaluation_measure(T_actual, T_pred)) # If a single fitter was given as argument, return a single result if len(fitters) == 1: return fitterscores[0] else: return fitterscores def normalize(X, mean=None, std=None): ''' Normalize X. If mean OR std is None, normalizes X to have mean 0 and std 1. ''' if mean is None or std is None: mean = X.mean(0) std = X.std(0) return (X - mean) / std def unnormalize(X, mean, std): ''' Reverse a normalization. Requires the original mean and standard deviation of the data set. ''' return X * std + mean def epanechnikov_kernel(t, T, bandwidth=1.): M = 0.75 * (1 - ((t - T) / bandwidth) ** 2) M[abs((t - T)) >= bandwidth] = 0 return M def significance_code(p): if p < 0.001: return '***' elif p < 0.01: return '**' elif p < 0.05: return '*' elif p < 0.1: return '.' else: return ' ' def ridge_regression(X, Y, c1=0.0, c2=0.0, offset=None): """ Also known as Tikhonov regularization. This solves the minimization problem: min_{beta} ||(beta X - Y)||^2 + c1||beta||^2 + c2||beta - offset||^2 One can find more information here: http://en.wikipedia.org/wiki/Tikhonov_regularization Parameters: X: a (n,d) numpy array Y: a (n,) numpy array c1: a scalar c2: a scalar offset: a (d,) numpy array. Returns: beta_hat: the solution to the minimization problem. V = (X*X^T + (c1+c2)I)^{-1} X^T """ n, d = X.shape X = X.astype(float) penalizer_matrix = (c1 + c2) * np.eye(d) if offset is None: offset = np.zeros((d,)) V_1 = inv(np.dot(X.T, X) + penalizer_matrix) V_2 = (np.dot(X.T, Y) + c2 * offset) beta = np.dot(V_1, V_2) return beta, np.dot(V_1, X.T) def _smart_search(minimizing_function, n, *args): from scipy.optimize import fmin_powell x = np.ones(n) return fmin_powell(minimizing_function, x, args=args, disp=False) def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse): """ Called to compute the Kaplan Meier and Nelson-Aalen estimates. """ if reverse: events = events.sort_index(ascending=False) at_risk = events['entrance'].sum() - events['removed'].cumsum().shift(1).fillna(0) deaths = events['observed'] estimate_ = np.cumsum(_additive_f(at_risk, deaths)).sort_index().shift(-1).fillna(0) var_ = np.cumsum(_additive_var(at_risk, deaths)).sort_index().shift(-1).fillna(0) else: deaths = events['observed'] at_risk = events['at_risk'] estimate_ = np.cumsum(_additive_f(at_risk, deaths)) var_ = np.cumsum(_additive_var(at_risk, deaths)) timeline = sorted(timeline) estimate_ = estimate_.reindex(timeline, method='pad').fillna(0) var_ = var_.reindex(timeline, method='pad') var_.index.name = 'timeline' estimate_.index.name = 'timeline' return estimate_, var_ def _preprocess_inputs(durations, event_observed, timeline, entry): """ Cleans and confirms input to what lifelines expects downstream """ n = len(durations) durations = np.asarray(durations).reshape((n,)) # set to all observed if event_observed is none if event_observed is None: event_observed = np.ones(n, dtype=int) else: event_observed = np.asarray(event_observed).reshape((n,)).copy().astype(int) if entry is not None: entry = np.asarray(entry).reshape((n,)) event_table = survival_table_from_events(durations, event_observed, entry) if timeline is None: timeline = event_table.index.values else: timeline = np.asarray(timeline) return durations, event_observed, timeline.astype(float), entry, event_table def _get_index(X): if isinstance(X, pd.DataFrame): index = list(X.index) else: # If it's not a dataframe, order is up to user index = list(range(X.shape[0])) return index class _BTree(object): """A simple balanced binary order statistic tree to help compute the concordance. When computing the concordance, we know all the values the tree will ever contain. That condition simplifies this tree a lot. It means that instead of crazy AVL/red-black shenanigans we can simply do the following: - Store the final tree in flattened form in an array (so node i's children are 2i+1, 2i+2) - Additionally, store the current size of each subtree in another array with the same indices - To insert a value, just find its index, increment the size of the subtree at that index and propagate - To get the rank of an element, you add up a bunch of subtree counts """ def __init__(self, values): """ Parameters: values: List of sorted (ascending), unique values that will be inserted. """ self._tree = self._treeify(values) self._counts = np.zeros_like(self._tree, dtype=int) @staticmethod def _treeify(values): """Convert the np.ndarray `values` into a complete balanced tree. Assumes `values` is sorted ascending. Returns a list `t` of the same length in which t[i] > t[2i+1] and t[i] < t[2i+2] for all i.""" if len(values) == 1: # this case causes problems later return values tree = np.empty_like(values) # Tree indices work as follows: # 0 is the root # 2n+1 is the left child of n # 2n+2 is the right child of n # So we now rearrange `values` into that format... # The first step is to remove the bottom row of leaves, which might not be exactly full last_full_row = int(np.log2(len(values) + 1) - 1) len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1) if len_ragged_row > 0: bottom_row_ix = np.s_[:2 * len_ragged_row:2] tree[-len_ragged_row:] = values[bottom_row_ix] values = np.delete(values, bottom_row_ix) # Now `values` is length 2**n - 1, so can be packed efficiently into a tree # Last row of nodes is indices 0, 2, ..., 2**n - 2 # Second-last row is indices 1, 5, ..., 2**n - 3 # nth-last row is indices (2**n - 1)::(2**(n+1)) values_start = 0 values_space = 2 values_len = 2 ** last_full_row while values_start < len(values): tree[values_len - 1:2 * values_len - 1] = values[values_start::values_space] values_start += int(values_space / 2) values_space *= 2 values_len = int(values_len / 2) return tree def insert(self, value): """Insert an occurrence of `value` into the btree.""" i = 0 n = len(self._tree) while i < n: cur = self._tree[i] self._counts[i] += 1 if value < cur: i = 2 * i + 1 elif value > cur: i = 2 * i + 2 else: return raise ValueError("Value %s not contained in tree." "Also, the counts are now messed up." % value) def __len__(self): return self._counts[0] def rank(self, value): """Returns the rank and count of the value in the btree.""" i = 0 n = len(self._tree) rank = 0 count = 0 while i < n: cur = self._tree[i] if value < cur: i = 2 * i + 1 continue elif value > cur: rank += self._counts[i] # subtract off the right tree if exists nexti = 2 * i + 2 if nexti < n: rank -= self._counts[nexti] i = nexti continue else: return (rank, count) else: # value == cur count = self._counts[i] lefti = 2 * i + 1 if lefti < n: nleft = self._counts[lefti] count -= nleft rank += nleft righti = lefti + 1 if righti < n: count -= self._counts[righti] return (rank, count) return (rank, count) def _concordance_index(event_times, predicted_event_times, event_observed): """Find the concordance index in n * log(n) time. Assumes the data has been verified by lifelines.utils.concordance_index first. """ # Here's how this works. # # It would be pretty easy to do if we had no censored data and no ties. There, the basic idea # would be to iterate over the cases in order of their true event time (from least to greatest), # while keeping track of a pool of *predicted* event times for all cases previously seen (= all # cases that we know should be ranked lower than the case we're looking at currently). # # If the pool has O(log n) insert and O(log n) RANK (i.e., "how many things in the pool have # value less than x"), then the following algorithm is n log n: # # Sort the times and predictions by time, increasing # n_pairs, n_correct := 0 # pool := {} # for each prediction p: # n_pairs += len(pool) # n_correct += rank(pool, p) # add p to pool # # There are three complications: tied ground truth values, tied predictions, and censored # observations. # # - To handle tied true event times, we modify the inner loop to work in *batches* of observations # p_1, ..., p_n whose true event times are tied, and then add them all to the pool # simultaneously at the end. # # - To handle tied predictions, which should each count for 0.5, we switch to # n_correct += min_rank(pool, p) # n_tied += count(pool, p) # # - To handle censored observations, we handle each batch of tied, censored observations just # after the batch of observations that died at the same time (since those censored observations # are comparable all the observations that died at the same time or previously). However, we do # NOT add them to the pool at the end, because they are NOT comparable with any observations # that leave the study afterward--whether or not those observations get censored. died_mask = event_observed.astype(bool) # TODO: is event_times already sorted? That would be nice... died_truth = event_times[died_mask] ix = np.argsort(died_truth) died_truth = died_truth[ix] died_pred = predicted_event_times[died_mask][ix] censored_truth = event_times[~died_mask] ix = np.argsort(censored_truth) censored_truth = censored_truth[ix] censored_pred = predicted_event_times[~died_mask][ix] censored_ix = 0 died_ix = 0 times_to_compare = _BTree(np.unique(died_pred)) num_pairs = 0 num_correct = 0 num_tied = 0 def handle_pairs(truth, pred, first_ix): """ Handle all pairs that exited at the same time as truth[first_ix]. Returns: (pairs, correct, tied, next_ix) new_pairs: The number of new comparisons performed new_correct: The number of comparisons correctly predicted next_ix: The next index that needs to be handled """ next_ix = first_ix while next_ix < len(truth) and truth[next_ix] == truth[first_ix]: next_ix += 1 pairs = len(times_to_compare) * (next_ix - first_ix) correct = 0 tied = 0 for i in range(first_ix, next_ix): rank, count = times_to_compare.rank(pred[i]) correct += rank tied += count return (pairs, correct, tied, next_ix) # we iterate through cases sorted by exit time: # - First, all cases that died at time t0. We add these to the sortedlist of died times. # - Then, all cases that were censored at time t0. We DON'T add these since they are NOT # comparable to subsequent elements. while True: has_more_censored = censored_ix < len(censored_truth) has_more_died = died_ix < len(died_truth) # Should we look at some censored indices next, or died indices? if has_more_censored and (not has_more_died or died_truth[died_ix] > censored_truth[censored_ix]): pairs, correct, tied, next_ix = handle_pairs(censored_truth, censored_pred, censored_ix) censored_ix = next_ix elif has_more_died and (not has_more_censored or died_truth[died_ix] <= censored_truth[censored_ix]): pairs, correct, tied, next_ix = handle_pairs(died_truth, died_pred, died_ix) for pred in died_pred[died_ix:next_ix]: times_to_compare.insert(pred) died_ix = next_ix else: assert not (has_more_died or has_more_censored) break num_pairs += pairs num_correct += correct num_tied += tied return (num_correct + num_tied / 2) / num_pairs def _naive_concordance_index(event_times, predicted_event_times, event_observed): """ Fallback, simpler method to compute concordance. Assumes the data has been verified by lifelines.utils.concordance_index first. """ def valid_comparison(time_a, time_b, event_a, event_b): """True if times can be compared.""" if time_a == time_b: # Ties are only informative if exactly one event happened return event_a != event_b elif event_a and event_b: return True elif event_a and time_a < time_b: return True elif event_b and time_b < time_a: return True else: return False def concordance_value(time_a, time_b, pred_a, pred_b): if pred_a == pred_b: # Same as random return 0.5 elif pred_a < pred_b: return (time_a < time_b) or (time_a == time_b and event_a and not event_b) else: # pred_a > pred_b return (time_a > time_b) or (time_a == time_b and not event_a and event_b) paircount = 0.0 csum = 0.0 for a in range(0, len(event_times)): time_a = event_times[a] pred_a = predicted_event_times[a] event_a = event_observed[a] # Don't want to double count for b in range(a + 1, len(event_times)): time_b = event_times[b] pred_b = predicted_event_times[b] event_b = event_observed[b] if valid_comparison(time_a, time_b, event_a, event_b): paircount += 1.0 csum += concordance_value(time_a, time_b, pred_a, pred_b) return csum / paircount
mit
moutai/scikit-learn
sklearn/feature_selection/variance_threshold.py
123
2572
# Author: Lars Buitinck # License: 3-clause BSD import numpy as np from ..base import BaseEstimator from .base import SelectorMixin from ..utils import check_array from ..utils.sparsefuncs import mean_variance_axis from ..utils.validation import check_is_fitted class VarianceThreshold(BaseEstimator, SelectorMixin): """Feature selector that removes all low-variance features. This feature selection algorithm looks only at the features (X), not the desired outputs (y), and can thus be used for unsupervised learning. Read more in the :ref:`User Guide <variance_threshold>`. Parameters ---------- threshold : float, optional Features with a training-set variance lower than this threshold will be removed. The default is to keep all features with non-zero variance, i.e. remove the features that have the same value in all samples. Attributes ---------- variances_ : array, shape (n_features,) Variances of individual features. Examples -------- The following dataset has integer features, two of which are the same in every sample. These are removed with the default setting for threshold:: >>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]] >>> selector = VarianceThreshold() >>> selector.fit_transform(X) array([[2, 0], [1, 4], [1, 1]]) """ def __init__(self, threshold=0.): self.threshold = threshold def fit(self, X, y=None): """Learn empirical variances from X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- self """ X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, "toarray"): # sparse matrix _, self.variances_ = mean_variance_axis(X, axis=0) else: self.variances_ = np.var(X, axis=0) if np.all(self.variances_ <= self.threshold): msg = "No feature in X meets the variance threshold {0:.5f}" if X.shape[0] == 1: msg += " (X contains only one sample)" raise ValueError(msg.format(self.threshold)) return self def _get_support_mask(self): check_is_fitted(self, 'variances_') return self.variances_ > self.threshold
bsd-3-clause
monsteredp/nemesys-qos
nemesys/netgraph.py
9
3506
#!/usr/bin/env python # printing_in_wx.py # from collections import deque from contabyte import Contabyte from pcapper import Pcapper from threading import Thread import math import matplotlib import numpy import socket import time import wx SECONDS = 60 POINTS_PER_SECONDS = 1 SAMPLE_INTERVAL = 0.8 matplotlib.use('WXAgg') from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas from matplotlib.figure import Figure class Updater(Thread): def __init__(self, window, ip, nap): Thread.__init__(self) self._window = window self._ip = ip self._nap = nap maxlen = int(math.ceil(SECONDS * POINTS_PER_SECONDS)) self._samples_down = deque(maxlen=maxlen) self._samples_up = deque(maxlen=maxlen) for i in range (0, maxlen): self._samples_down.append(0) self._samples_up.append(0) self._p = Pcapper(self._ip) self._p.start() def _get_sample(self): self._p.sniff(Contabyte(self._ip, self._nap)) time.sleep(SAMPLE_INTERVAL) self._p.stop_sniff() stats = self._p.get_stats() down = stats.byte_down_all_net * 8.0 / (SAMPLE_INTERVAL * 1000.0) up = -stats.byte_up_all_net * 8.0 / (SAMPLE_INTERVAL * 1000.0) return (down, up) def _update_samples(self): (down, up) = self._get_sample() self._samples_down.popleft() self._samples_down.append(down) self._samples_up.popleft() self._samples_up.append(up) def run(self): while(self._window): try: self._update_samples() wx.CallAfter(self._window.Plot_Data, list(self._samples_down), list(self._samples_up)) time.sleep(1.0 / POINTS_PER_SECONDS - SAMPLE_INTERVAL) except: break self._p.stop() self._p.join() def stop(self): self._window = None class Netgraph(wx.Frame): def __init__(self): wx.Frame.__init__ (self, None, id=wx.ID_ANY, title='Netgraph', size=wx.Size(400, 200), style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.RESIZE_BOX)) self.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW)) self.figure = Figure() self.axes = self.figure.add_subplot(111) self.axes.set_xticklabels([]) self.axes.set_ylabel("Kbps") self._min = 0 self._max = 0 t = numpy.arange(-SECONDS, 0, 1.0 / POINTS_PER_SECONDS) self.d, = self.axes.plot(t, numpy.zeros(60 * 1.0 / POINTS_PER_SECONDS), linewidth=2, color='red') self.u, = self.axes.plot(t, numpy.zeros(60 * 1.0 / POINTS_PER_SECONDS), linewidth=2, color='blue') self.canvas = FigCanvas(self, -1, self.figure) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW) self.SetSizer(sizer) self.Fit() def onExit(self, event=None): self.Destroy() def _check_limits(self, u, d): min = numpy.min(u) max = numpy.max(d) if (min < self._min or max > self._max): self._min = min self._max = max self.axes.set_ylim(self._min - 50, self._max + 50) def Plot_Data(self, d, u): self._check_limits(u, d) self.d.set_ydata(d) self.u.set_ydata(u) self.canvas.draw() if __name__ == '__main__': app = wx.PySimpleApp() fig = Netgraph() fig.Show() s = socket.socket(socket.AF_INET) s.connect(('www.fub.it', 80)) ip = s.getsockname()[0] s.close() nap = '193.104.137.133' u = Updater(fig, ip, nap) u.start() app.MainLoop() u.stop() u.join()
gpl-3.0
HolgerPeters/scikit-learn
benchmarks/bench_glmnet.py
111
3890
""" To run this, you'll need to have installed. * glmnet-python * scikit-learn (of course) Does two benchmarks First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import numpy as np import gc from time import time from sklearn.datasets.samples_generator import make_regression alpha = 0.1 # alpha = 0.01 def rmse(a, b): return np.sqrt(np.mean((a - b) ** 2)) def bench(factory, X, Y, X_test, Y_test, ref_coef): gc.collect() # start time tstart = time() clf = factory(alpha=alpha).fit(X, Y) delta = (time() - tstart) # stop time print("duration: %0.3fs" % delta) print("rmse: %f" % rmse(Y_test, clf.predict(X_test))) print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean()) return delta if __name__ == '__main__': from glmnet.elastic_net import Lasso as GlmnetLasso from sklearn.linear_model import Lasso as ScikitLasso # Delayed import of matplotlib.pyplot import matplotlib.pyplot as plt scikit_results = [] glmnet_results = [] n = 20 step = 500 n_features = 1000 n_informative = n_features / 10 n_test_samples = 1000 for i in range(1, n + 1): print('==================') print('Iteration %s of %s' % (i, n)) print('==================') X, Y, coef_ = make_regression( n_samples=(i * step) + n_test_samples, n_features=n_features, noise=0.1, n_informative=n_informative, coef=True) X_test = X[-n_test_samples:] Y_test = Y[-n_test_samples:] X = X[:(i * step)] Y = Y[:(i * step)] print("benchmarking scikit-learn: ") scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_)) print("benchmarking glmnet: ") glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_)) plt.clf() xx = range(0, n * step, step) plt.title('Lasso regression on sample dataset (%d features)' % n_features) plt.plot(xx, scikit_results, 'b-', label='scikit-learn') plt.plot(xx, glmnet_results, 'r-', label='glmnet') plt.legend() plt.xlabel('number of samples to classify') plt.ylabel('Time (s)') plt.show() # now do a benchmark where the number of points is fixed # and the variable is the number of features scikit_results = [] glmnet_results = [] n = 20 step = 100 n_samples = 500 for i in range(1, n + 1): print('==================') print('Iteration %02d of %02d' % (i, n)) print('==================') n_features = i * step n_informative = n_features / 10 X, Y, coef_ = make_regression( n_samples=(i * step) + n_test_samples, n_features=n_features, noise=0.1, n_informative=n_informative, coef=True) X_test = X[-n_test_samples:] Y_test = Y[-n_test_samples:] X = X[:n_samples] Y = Y[:n_samples] print("benchmarking scikit-learn: ") scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_)) print("benchmarking glmnet: ") glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_)) xx = np.arange(100, 100 + n * step, step) plt.figure('scikit-learn vs. glmnet benchmark results') plt.title('Regression in high dimensional spaces (%d samples)' % n_samples) plt.plot(xx, scikit_results, 'b-', label='scikit-learn') plt.plot(xx, glmnet_results, 'r-', label='glmnet') plt.legend() plt.xlabel('number of features') plt.ylabel('Time (s)') plt.axis('tight') plt.show()
bsd-3-clause
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/misopy/sashimi_plot/sashimi_plot.py
1
10725
# -*- mode: python; -*- ## ## sashimi_plot ## ## Utility for visualizing RNA-Seq densities along gene models and ## for plotting MISO output ## import os import sys import glob import matplotlib # Use PDF backend matplotlib.use("pdf") from scipy import * from numpy import * import pysam import shelve import misopy import misopy.gff_utils as gff_utils import misopy.pe_utils as pe_utils from misopy.parse_csv import csv2dictlist_raw from misopy.samples_utils import load_samples from misopy.sashimi_plot.Sashimi import Sashimi from misopy.sashimi_plot.plot_utils.samples_plotter import SamplesPlotter from misopy.sashimi_plot.plot_utils.plotting import * from misopy.sashimi_plot.plot_utils.plot_gene import plot_density_from_file import matplotlib.pyplot as plt from matplotlib import rc def plot_bf_dist(bf_filename, settings_filename, output_dir, max_bf=1e12): """ Plot a Bayes factor distribution from a .miso_bf file. """ if not bf_filename.endswith(".miso_bf"): print "WARNING: %s does not end in .miso_bf, are you sure it is the " \ "output of a MISO samples comparison?" %(bf_filename) # Load BF data data, h = csv2dictlist_raw(bf_filename) plot_name = os.path.basename(bf_filename) sashimi_obj = Sashimi(plot_name, output_dir, settings_filename=settings_filename) settings = sashimi_obj.settings # Setup the figure sashimi_obj.setup_figure() # Matrix of bayes factors and delta psi pairs bfs_and_deltas = [] for event in data: bf = event['bayes_factor'] delta_psi = event['diff'] if type(bf) == str and "," in bf: print "WARNING: %s is a multi-isoform event, skipping..." \ %(event) continue else: # Impose upper limit on Bayes factor bf = min(1e12, float(bf)) delta_psi = float(delta_psi) bfs_and_deltas.append([bf, delta_psi]) bfs_and_deltas = array(bfs_and_deltas) num_events = len(bfs_and_deltas) print "Loaded %d event comparisons." %(num_events) output_filename = sashimi_obj.output_filename print "Plotting Bayes factors distribution" print " - Output filename: %s" %(output_filename) bf_thresholds = settings["bf_thresholds"] bar_color = settings["bar_color"] min_bf_thresh = min(bf_thresholds) num_events_used = sum(bfs_and_deltas[:, 0] >= min_bf_thresh) for thresh in bf_thresholds: if type(thresh) != int: print "Error: BF thresholds must be integers." sys.exit(1) print "Using BF thresholds: " print bf_thresholds print "Using bar color: %s" %(bar_color) plot_cumulative_bars(bfs_and_deltas[:, 0], bf_thresholds, bar_color=bar_color, logged=True) plt.xticks(bf_thresholds) c = 1 plt.xlim([bf_thresholds[0] - c, bf_thresholds[-1] + c]) plt.title("Bayes factor distributions\n(using %d/%d events)" \ %(num_events_used, num_events)) plt.xlabel("Bayes factor thresh.") plt.ylabel("No. events") sashimi_obj.save_plot() def plot_event(event_name, pickle_dir, settings_filename, output_dir, no_posteriors=False, plot_title=None, plot_label=None): """ Visualize read densities across the exons and junctions of a given MISO alternative RNA processing event. Also plots MISO estimates and Psi values. """ if not os.path.isfile(settings_filename): print "Error: settings filename %s not found." %(settings_filename) sys.exit(1) if not os.path.isdir(pickle_dir): print "Error: event pickle directory %s not found." %(pickle_dir) sys.exit(1) # Retrieve the full pickle filename genes_filename = os.path.join(pickle_dir, "genes_to_filenames.shelve") # Check that file basename exists if len(glob.glob("%s*" %(genes_filename))) == 0: raise Exception, "Cannot find file %s. Are you sure the events " \ "were indexed with the latest version of index_gff.py?" \ %(genes_filename) event_to_filenames = shelve.open(genes_filename) if event_name not in event_to_filenames: raise Exception, "Event %s not found in pickled directory %s. " \ "Are you sure this is the right directory for the event?" \ %(event_name, pickle_dir) pickle_filename = event_to_filenames[event_name] if no_posteriors: print "Asked to not plot MISO posteriors." plot_density_from_file(settings_filename, pickle_filename, event_name, output_dir, no_posteriors=no_posteriors, plot_title=plot_title, plot_label=plot_label) def plot_insert_len(insert_len_filename, settings_filename, output_dir): """ Plot insert length distribution. """ if not os.path.isfile(settings_filename): print "Error: settings filename %s not found." %(settings_filename) sys.exit(1) plot_name = os.path.basename(insert_len_filename) sashimi_obj = Sashimi(plot_name, output_dir, settings_filename=settings_filename) settings = sashimi_obj.settings num_bins = settings["insert_len_bins"] output_filename = sashimi_obj.output_filename sashimi_obj.setup_figure() s = plt.subplot(1, 1, 1) print "Plotting insert length distribution..." print " - Distribution file: %s" %(insert_len_filename) print " - Output plot: %s" %(output_filename) insert_dist, params = pe_utils.load_insert_len(insert_len_filename) mean, sdev, dispersion, num_pairs \ = pe_utils.compute_insert_len_stats(insert_dist) print "min insert: %.1f" %(min(insert_dist)) print "max insert: %.1f" %(max(insert_dist)) plt.title("%s (%d read-pairs)" \ %(plot_name, num_pairs), fontsize=10) plt.hist(insert_dist, bins=num_bins, color='k', edgecolor="#ffffff", align='mid') axes_square(s) ymin, ymax = s.get_ylim() plt.text(0.05, 0.95, "$\mu$: %.1f\n$\sigma$: %.1f\n$d$: %.1f" \ %(round(mean, 2), round(sdev, 2), round(dispersion, 2)), horizontalalignment='left', verticalalignment='top', bbox=dict(edgecolor='k', facecolor="#ffffff", alpha=0.5), fontsize=10, transform=s.transAxes) plt.xlabel("Insert length (nt)") plt.ylabel("No. read pairs") sashimi_obj.save_plot() def greeting(): print "Sashimi plot: Visualize spliced RNA-Seq reads along gene models. " \ "Part of the MISO (Mixture of Isoforms model) framework." print "See --help for usage.\n" print "Manual available at: http://genes.mit.edu/burgelab/miso/docs/sashimi.html\n" def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("--plot-insert-len", dest="plot_insert_len", nargs=2, default=None, help="Plot the insert length distribution from a given insert length (*.insert_len) " "filename. Second argument is a settings file name.") parser.add_option("--plot-bf-dist", dest="plot_bf_dist", nargs=2, default=None, help="Plot Bayes factor distributon. Takes the arguments: " "(1) Bayes factor filename (*.miso_bf) filename, " "(2) a settings filename.") parser.add_option("--plot-event", dest="plot_event", nargs=3, default=None, help="Plot read densities and MISO inferences for a given alternative event. " "Takes the arguments: (1) event name (i.e. the ID= of the event based on MISO gff3 " "annotation file, (2) directory where indexed GFF annotation is (output of " "index_gff.py), (3) path to plotting settings file.") parser.add_option("--no-posteriors", dest="no_posteriors", default=False, action="store_true", help="If given this argument, MISO posterior estimates are not plotted.") parser.add_option("--plot-title", dest="plot_title", default=None, nargs=1, help="Title of plot: a string that will be displayed at top of plot. Example: " \ "--plot-title \"My favorite gene\".") parser.add_option("--plot-label", dest="plot_label", default=None, nargs=1, help="Plot label. If given, plot will be saved in the output directory as " \ "the plot label ending in the relevant extension, e.g. <plot_label>.pdf. " \ "Example: --plot-label my_gene") parser.add_option("--output-dir", dest="output_dir", nargs=1, default=None, help="Output directory.") (options, args) = parser.parse_args() if options.plot_event is None: greeting() sys.exit(1) if options.output_dir == None: print "Error: need --output-dir" sys.exit(1) output_dir = os.path.abspath(os.path.expanduser(options.output_dir)) if not os.path.isdir(output_dir): os.makedirs(output_dir) no_posteriors = options.no_posteriors plot_title = options.plot_title plot_label = options.plot_label if options.plot_insert_len != None: insert_len_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[0])) settings_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[1])) plot_insert_len(insert_len_filename, settings_filename, output_dir) if options.plot_bf_dist != None: bf_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[0])) settings_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[1])) plot_bf_dist(bf_filename, settings_filename, output_dir) if options.plot_event != None: event_name = options.plot_event[0] pickle_dir = os.path.abspath(os.path.expanduser(options.plot_event[1])) settings_filename = os.path.abspath(os.path.expanduser(options.plot_event[2])) plot_event(event_name, pickle_dir, settings_filename, output_dir, no_posteriors=no_posteriors, plot_title=plot_title, plot_label=plot_label) if __name__ == '__main__': main()
apache-2.0
mhue/scikit-learn
sklearn/feature_extraction/dict_vectorizer.py
234
12267
# Authors: Lars Buitinck # Dan Blanchard <dblanchard@ets.org> # License: BSD 3 clause from array import array from collections import Mapping from operator import itemgetter import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.six.moves import xrange from ..utils import check_array, tosequence from ..utils.fixes import frombuffer_empty def _tosequence(X): """Turn X into a sequence or ndarray, avoiding a copy if possible.""" if isinstance(X, Mapping): # single sample return [X] else: return tosequence(X) class DictVectorizer(BaseEstimator, TransformerMixin): """Transforms lists of feature-value mappings to vectors. This transformer turns lists of mappings (dict-like objects) of feature names to feature values into Numpy arrays or scipy.sparse matrices for use with scikit-learn estimators. When feature values are strings, this transformer will do a binary one-hot (aka one-of-K) coding: one boolean-valued feature is constructed for each of the possible string values that the feature can take on. For instance, a feature "f" that can take on the values "ham" and "spam" will become two features in the output, one signifying "f=ham", the other "f=spam". Features that do not occur in a sample (mapping) will have a zero value in the resulting array/matrix. Read more in the :ref:`User Guide <dict_feature_extraction>`. Parameters ---------- dtype : callable, optional The type of feature values. Passed to Numpy array/scipy.sparse matrix constructors as the dtype argument. separator: string, optional Separator string used when constructing new features for one-hot coding. sparse: boolean, optional. Whether transform should produce scipy.sparse matrices. True by default. sort: boolean, optional. Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting. True by default. Attributes ---------- vocabulary_ : dict A dictionary mapping feature names to feature indices. feature_names_ : list A list of length n_features containing the feature names (e.g., "f=ham" and "f=spam"). Examples -------- >>> from sklearn.feature_extraction import DictVectorizer >>> v = DictVectorizer(sparse=False) >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] >>> X = v.fit_transform(D) >>> X array([[ 2., 0., 1.], [ 0., 1., 3.]]) >>> v.inverse_transform(X) == \ [{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}] True >>> v.transform({'foo': 4, 'unseen_feature': 3}) array([[ 0., 0., 4.]]) See also -------- FeatureHasher : performs vectorization using only a hash function. sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features encoded as columns of integers. """ def __init__(self, dtype=np.float64, separator="=", sparse=True, sort=True): self.dtype = dtype self.separator = separator self.sparse = sparse self.sort = sort def fit(self, X, y=None): """Learn a list of feature name -> indices mappings. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns ------- self """ feature_names = [] vocab = {} for x in X: for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) if f not in vocab: feature_names.append(f) vocab[f] = len(vocab) if self.sort: feature_names.sort() vocab = dict((f, i) for i, f in enumerate(feature_names)) self.feature_names_ = feature_names self.vocabulary_ = vocab return self def _transform(self, X, fitting): # Sanity check: Python's array has no way of explicitly requesting the # signed 32-bit integers that scipy.sparse needs, so we use the next # best thing: typecode "i" (int). However, if that gives larger or # smaller integers than 32-bit ones, np.frombuffer screws up. assert array("i").itemsize == 4, ( "sizeof(int) != 4 on your platform; please report this at" " https://github.com/scikit-learn/scikit-learn/issues and" " include the output from platform.platform() in your bug report") dtype = self.dtype if fitting: feature_names = [] vocab = {} else: feature_names = self.feature_names_ vocab = self.vocabulary_ # Process everything as sparse regardless of setting X = [X] if isinstance(X, Mapping) else X indices = array("i") indptr = array("i", [0]) # XXX we could change values to an array.array as well, but it # would require (heuristic) conversion of dtype to typecode... values = [] # collect all the possible feature names and build sparse matrix at # same time for x in X: for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) v = 1 if f in vocab: indices.append(vocab[f]) values.append(dtype(v)) else: if fitting: feature_names.append(f) vocab[f] = len(vocab) indices.append(vocab[f]) values.append(dtype(v)) indptr.append(len(indices)) if len(indptr) == 1: raise ValueError("Sample sequence X is empty.") indices = frombuffer_empty(indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) shape = (len(indptr) - 1, len(vocab)) result_matrix = sp.csr_matrix((values, indices, indptr), shape=shape, dtype=dtype) # Sort everything if asked if fitting and self.sort: feature_names.sort() map_index = np.empty(len(feature_names), dtype=np.int32) for new_val, f in enumerate(feature_names): map_index[new_val] = vocab[f] vocab[f] = new_val result_matrix = result_matrix[:, map_index] if self.sparse: result_matrix.sort_indices() else: result_matrix = result_matrix.toarray() if fitting: self.feature_names_ = feature_names self.vocabulary_ = vocab return result_matrix def fit_transform(self, X, y=None): """Learn a list of feature name -> indices mappings and transform X. Like fit(X) followed by transform(X), but does not require materializing X in memory. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d. """ return self._transform(X, fitting=True) def inverse_transform(self, X, dict_type=dict): """Transform array or sparse matrix X back to feature mappings. X must have been produced by this DictVectorizer's transform or fit_transform method; it may only have passed through transformers that preserve the number of features and their order. In the case of one-hot/one-of-K coding, the constructed feature names and values are returned rather than the original ones. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Sample matrix. dict_type : callable, optional Constructor for feature mappings. Must conform to the collections.Mapping API. Returns ------- D : list of dict_type objects, length = n_samples Feature mappings for the samples in X. """ # COO matrix is not subscriptable X = check_array(X, accept_sparse=['csr', 'csc']) n_samples = X.shape[0] names = self.feature_names_ dicts = [dict_type() for _ in xrange(n_samples)] if sp.issparse(X): for i, j in zip(*X.nonzero()): dicts[i][names[j]] = X[i, j] else: for i, d in enumerate(dicts): for j, v in enumerate(X[i, :]): if v != 0: d[names[j]] = X[i, j] return dicts def transform(self, X, y=None): """Transform feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- X : Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d. """ if self.sparse: return self._transform(X, fitting=False) else: dtype = self.dtype vocab = self.vocabulary_ X = _tosequence(X) Xa = np.zeros((len(X), len(vocab)), dtype=dtype) for i, x in enumerate(X): for f, v in six.iteritems(x): if isinstance(v, six.string_types): f = "%s%s%s" % (f, self.separator, v) v = 1 try: Xa[i, vocab[f]] = dtype(v) except KeyError: pass return Xa def get_feature_names(self): """Returns a list of feature names, ordered by their indices. If one-of-K coding is applied to categorical features, this will include the constructed feature names but not the original ones. """ return self.feature_names_ def restrict(self, support, indices=False): """Restrict the features to those in support using feature selection. This function modifies the estimator in-place. Parameters ---------- support : array-like Boolean mask or list of indices (as returned by the get_support member of feature selectors). indices : boolean, optional Whether support is a list of indices. Returns ------- self Examples -------- >>> from sklearn.feature_extraction import DictVectorizer >>> from sklearn.feature_selection import SelectKBest, chi2 >>> v = DictVectorizer() >>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}] >>> X = v.fit_transform(D) >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1]) >>> v.get_feature_names() ['bar', 'baz', 'foo'] >>> v.restrict(support.get_support()) # doctest: +ELLIPSIS DictVectorizer(dtype=..., separator='=', sort=True, sparse=True) >>> v.get_feature_names() ['bar', 'foo'] """ if not indices: support = np.where(support)[0] names = self.feature_names_ new_vocab = {} for i in support: new_vocab[names[i]] = len(new_vocab) self.vocabulary_ = new_vocab self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab), key=itemgetter(1))] return self
bsd-3-clause
Tastalian/openravepypy
pymanoid/models.py
3
7028
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2015-2020 Stephane Caron <stephane.caron@normalesup.org> # # This file is part of pymanoid <https://github.com/stephane-caron/pymanoid>. # # pymanoid is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # pymanoid is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # pymanoid. If not, see <http://www.gnu.org/licenses/>. from numpy import cosh, dot, sinh, sqrt from .body import Point from .gui import draw_line, draw_point from .misc import warn from .sim import Process, gravity class InvertedPendulum(Process): """ Inverted pendulum model. Parameters ---------- pos : (3,) array Initial position in the world frame. vel : (3,) array Initial velocity in the world frame. contact : pymanoid.Contact Contact surface specification. lambda_min : scalar Minimum virtual leg stiffness. lambda_max : scalar Maximum virtual leg stiffness. clamp : bool, optional Clamp inputs (e.g. CoP) if they exceed constraints (e.g. support area)? visible : bool, optional Draw the pendulum model in GUI? color : char, optional Color code in matplotlib convention ('r' for red, 'b' for blue, etc.). size : scalar, optional Half-length of a side of the CoM cube handle, in [m]. """ def __init__(self, pos, vel, contact, lambda_min=1e-5, lambda_max=None, clamp=True, visible=True, color='b', size=0.02): super(InvertedPendulum, self).__init__() com = Point(pos, vel, size=size, color=color) self.clamp = clamp self.color = color self.com = com self.contact = contact self.cop = contact.p self.handles = None self.is_visible = visible self.lambda_ = -gravity[2] / (com.z - contact.z) self.lambda_max = lambda_max self.lambda_min = lambda_min if visible: self.show() else: # not visible self.hide() def copy(self, visible=True): """ Copy constructor. Parameters ---------- visible : bool, optional Should the copy be visible? """ return InvertedPendulum( self.com.p, self.com.pd, self.contact, visible=visible) def draw(self): """Draw inverted pendulum.""" fulcrum = draw_point(self.cop, pointsize=0.01, color=self.color) leg = draw_line(self.com.p, self.cop, linewidth=4, color=self.color) self.handles = [fulcrum, leg] def hide(self): """Hide pendulum from the GUI.""" self.com.hide() if self.handles: for handle in self.handles: handle.Close() self.is_visible = False def show(self): """Show pendulum in the GUI.""" self.com.show() self.draw() self.is_visible = True def set_contact(self, contact): """ Update the contact the pendulum rests upon. Parameters ---------- contact : pymanoid.Contact New contact where CoPs can be realized. """ self.contact = contact def set_cop(self, cop, clamp=None): """ Update the CoP location on the contact surface. Parameters ---------- cop : (3,) array New CoP location in the world frame. clamp : bool, optional Clamp CoP within the contact area if it lies outside. Overrides ``self.clamp``. """ if (self.clamp if clamp is None else clamp): cop_local = dot(self.contact.R.T, cop - self.contact.p) if cop_local[0] >= self.contact.shape[0]: cop_local[0] = self.contact.shape[0] - 1e-5 elif cop_local[0] <= -self.contact.shape[0]: cop_local[0] = -self.contact.shape[0] + 1e-5 if cop_local[1] >= self.contact.shape[1]: cop_local[1] = self.contact.shape[1] - 1e-5 elif cop_local[1] <= -self.contact.shape[1]: cop_local[1] = -self.contact.shape[1] + 1e-5 cop = self.contact.p + dot(self.contact.R, cop_local) elif __debug__: cop_check = dot(self.contact.R.T, cop - self.contact.p) if abs(cop_check[0]) > 1.05 * self.contact.shape[0]: warn("CoP crosses contact area along sagittal axis") if abs(cop_check[1]) > 1.05 * self.contact.shape[1]: warn("CoP crosses contact area along lateral axis") if abs(cop_check[2]) > 0.01: warn("CoP does not lie on contact area") self.cop = cop def set_lambda(self, lambda_, clamp=None): """ Update the leg stiffness coefficient. Parameters ---------- lambda_ : scalar Leg stiffness coefficient (positive). clamp : bool, optional Clamp value if it exits the [lambda_min, lambda_max] interval. Overrides ``self.clamp``. """ if (self.clamp if clamp is None else clamp): if self.lambda_min is not None and lambda_ < self.lambda_min: lambda_ = self.lambda_min if self.lambda_max is not None and lambda_ > self.lambda_max: lambda_ = self.lambda_max elif __debug__: if self.lambda_min is not None and lambda_ < self.lambda_min: warn("Stiffness %f below %f" % (lambda_, self.lambda_min)) if self.lambda_max is not None and lambda_ > self.lambda_max: warn("Stiffness %f above %f" % (lambda_, self.lambda_max)) self.lambda_ = lambda_ def integrate(self, duration): """ Integrate dynamics forward for a given duration. Parameters ---------- duration : scalar Duration of forward integration. """ omega = sqrt(self.lambda_) p0 = self.com.p pd0 = self.com.pd ch, sh = cosh(omega * duration), sinh(omega * duration) vrp = self.cop - gravity / self.lambda_ p = p0 * ch + pd0 * sh / omega - vrp * (ch - 1.) pd = pd0 * ch + omega * (p0 - vrp) * sh self.com.set_pos(p) self.com.set_vel(pd) def on_tick(self, sim): """ Integrate dynamics for one simulation step. Parameters ---------- sim : pymanoid.Simulation Simulation instance. """ self.integrate(sim.dt) if self.is_visible: self.draw()
gpl-3.0
c3s-magic/adaguc-services-esmvaltool-wps
processes/pretty-picture.py
1
1942
""" Example processing returning some image """ import logging import os import shutil from pywps.Process.Process import WPSProcess class Process(WPSProcess): def __init__(self): # init process WPSProcess.__init__(self, identifier="pretty-picture", #the same as the file name version = "1.0", title="Some picture", storeSupported = "true", statusSupported = "true", abstract="Some picture.", grassLocation =False) #Input (c4i fails if no inputs are present) self.tag = self.addLiteralInput(identifier="tag",title = "Specify a custom title for this process",type="String",default="unspecified") self.picture=self.addComplexOutput(identifier="picture", title="Raster out", formats=[{"mimeType":"image/png"}]) self.picture2=self.addComplexOutput(identifier="picture2", title="Raster out", formats=[{"mimeType":"image/png"}]) def execute(self): outfile = "/miniconda/lib/python2.7/site-packages/matplotlib/backends/web_backend/jquery/css/themes/base/images/ui-icons_228ef1_256x240.png" # self.picture.format = {'mimeType':"image/png"} self.picture.setValue(outfile) self.picture2.setValue(outfile) # dir(self) # self.processTitle.setValue(self.tag.getValue()) # logging.debug('output path is', os.environ['POF_OUTPUT_PATH']) # logging.debug('output url is', os.environ['POF_OUTPUT_URL']) # shutil.copy(outfile, os.environ['POF_OUTPUT_PATH']) # filelink = 'https://upload.wikimedia.org/wikipedia/commons/d/df/LocationOceans.png' # filelink = os.environ['POF_OUTPUT_URL'] + '/' + 'ui-icons_228ef1_256x240.png' # self.morepicture.setValue(filelink) return
apache-2.0
ahwillia/PyNeuron-Toolbox
PyNeuronToolbox/channel_analysis.py
1
2536
def ivcurve(mechanism_name, i_type, vmin=-100, vmax=100, deltav=1, transient_time=50, test_time=50, rs=1, vinit=-665): """ Returns the (peak) current-voltage relationship for an ion channel. Args: mechanism_name = name of the mechanism (e.g. hh) i_type = which current to monitor (e.g. ik, ina) vmin = minimum voltage step to test vmax = maximum voltage step to test deltav = increment of voltage transient_time = how long to ignore for initial conditions to stabilize (ms) test_time = duration of the voltage clamp tests (ms) rs = resistance of voltage clamp in MOhm vinit = initialization voltage Returns: i = iterable of peak currents (in mA/cm^2) v = iterable of corresponding test voltages Note: The initialization potential (vinit) may affect the result. For example, consider the Hodgkin-Huxley sodium channel; a large fraction are inactivated at rest. Using a strongly hyperpolarizing vinit will uninactivate many channels, leading to more current. """ from neuron import h import numpy h.load_file('stdrun.hoc') sec = h.Section() sec.insert(mechanism_name) sec.L = 1 sec.diam = 1 seclamp = h.SEClamp(sec(0.5)) seclamp.amp1 = vinit seclamp.dur1 = transient_time seclamp.dur2 = test_time seclamp.rs = rs i_record = h.Vector() i_record.record(sec(0.5).__getattribute__('_ref_' + i_type)) result_i = [] result_v = numpy.arange(vmin, vmax, deltav) for test_v in result_v: seclamp.amp2 = test_v h.finitialize(vinit) h.continuerun(transient_time) num_transient_points = len(i_record) h.continuerun(test_time + transient_time) i_record2 = i_record.as_numpy()[num_transient_points:] baseline_i = i_record2[0] i_record_shift = i_record2 - baseline_i max_i = max(i_record_shift) min_i = min(i_record_shift) peak_i = max_i if abs(max_i) > abs(min_i) else min_i peak_i += baseline_i result_i.append(peak_i) return result_i, result_v if __name__ == '__main__': from matplotlib import pyplot import numpy from neuron import h h.CVode().active(1) ik, v = ivcurve('hh', 'ik') pyplot.plot(v, ik, label='ik') ina, v = ivcurve('hh', 'ina', vinit=-100) pyplot.plot(v, ina, label='ina') pyplot.xlabel('v (mV)') pyplot.ylabel('current (mA/cm^2)') pyplot.legend() pyplot.show()
mit
jcl5m1/quadrotor_control
gyroplot.py
1
3106
import numpy as np import matplotlib.pyplot as plt import sys, os, serial, threading import pygame from pygame.locals import * from collections import deque red = pygame.Color(255,0,0) green = pygame.Color(0,255,0) yellow = pygame.Color(255,255,0) cyan = pygame.Color(0,255,255) blue = pygame.Color(0,0,255) black = pygame.Color(0,0,0) white = pygame.Color(255,255,255) pygame.init() fpsClock = pygame.time.Clock() width = 800 hieght = 800 windowSurfaceObj = pygame.display.set_mode((width,hieght)) pygame.display.set_caption('Python Plot') def convertBytes(b1, b2): value = (b1<<8) + b2 if(value > 32768): return value - 65536 return value def parseData(): d = ord(ser.read()) if(d != 170): return; x = convertBytes(ord(ser.read()),ord(ser.read())) y = convertBytes(ord(ser.read()),ord(ser.read())) z = convertBytes(ord(ser.read()),ord(ser.read())) return x,y,z def convertScale(v): return hieght/2 - v/10 # class that holds analog data for N samples class AnalogData: # constr def __init__(self, maxLen): self.ax = deque([0.0]*maxLen) self.ay = deque([0.0]*maxLen) self.az = deque([0.0]*maxLen) self.maxLen = maxLen #compute statistics def computeStats(self,buf): mu = 0.0 sig = 0.0 for i in buf: mu += i mu /= len(buf) for i in buf: sig += (i-mu)*(i-mu) sig /= len(buf)-1 return mu,sig def computeAllStats(self): print "x: ",self.computeStats(self.ax) print "y: ",self.computeStats(self.ay) print "z: ",self.computeStats(self.az) # ring buffer def addToBuf(self, buf, val): if len(buf) < self.maxLen: buf.append(val) else: buf.pop() buf.appendleft(val) # add data def add(self, data): assert(len(data) == 3) self.addToBuf(self.ax, data[0]) self.addToBuf(self.ay, data[1]) self.addToBuf(self.az, data[2]) port = "/dev/ttyACM0" baud = 57600 print "Openning",port,"at",baud,"..." ser = serial.Serial(port, baud, timeout=1) xCursor = 0; px2 = (0,0) py2 = (0,0) pz2 = (0,0) observationCount = 1000 analogData = AnalogData(observationCount) dataCount = 0 while True: if ser.isOpen(): data = parseData(); print "Data",dataCount,":",data analogData.add(data) dataCount += 1 if(dataCount == observationCount): analogData.computeAllStats() pygame.quit() sys.exit() px1 = px2 py1 = py2 pz1 = pz2 px2 = (xCursor,convertScale(data[0])); py2 = (xCursor,convertScale(data[1])); pz2 = (xCursor,convertScale(data[2])); if(xCursor == 0): px1 = px2 py1 = py2 pz1 = pz2 pygame.draw.rect(windowSurfaceObj, black,(xCursor,0,10,hieght)) pygame.draw.line(windowSurfaceObj, white, (xCursor+1,0),(xCursor+1,hieght),1) pygame.draw.line(windowSurfaceObj, red, px1,px2,1) pygame.draw.line(windowSurfaceObj, green, py1,py2,1) pygame.draw.line(windowSurfaceObj, blue, pz1,pz2,1) xCursor += 1 if(xCursor >= width): xCursor = 0 for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() if event.type == KEYDOWN: if event.key == K_ESCAPE: pygame.event.post(pygame.event.Event(QUIT)) pygame.display.update() ser.close()
apache-2.0
zhoushuaicode/machinelearning
classify.py
1
1255
import pandas as pd import numpy as np import operator def classify(trainset,testset,k): trainlabel=pd.DataFrame(trainset['class']) #testlabel=pd.DataFrame(testset['class']) trainfeatrue=trainset.drop('class',axis=1) testfeature=testset.drop('class',axis=1) result=list() for i in list(testfeature.index): diff=trainfeatrue-testfeature.ix[i] sqdiff=diff**2 sqdistance=sqdiff.sum(axis=1) distance=sqdistance**0.5 sortofdis=distance.rank(method='first') indexlist=np.zeros(k) label=pd.DataFrame() count={} for i in range(k): indexlist[i]=sortofdis.idxmin() label=label.append(trainlabel.ix[sortofdis.idxmin()]) #print label.ix[sortofdis.idxmin(),'class'] count[label.ix[sortofdis.idxmin(),'class']]=count.get(label.ix[sortofdis.idxmin(),'class'],0)+1 sortofdis=sortofdis.drop(sortofdis.idxmin()) sortedcount=sorted(count.iteritems(),key=operator.itemgetter(1),reverse=True) #return sortedcount[0][0] #print sortedcount[0][0] result.append(sortedcount[0][0]) #print result return result
mit
SciTools/iris
lib/iris/tests/unit/quickplot/test_contour.py
5
1533
# Copyright Iris contributors # # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """Unit tests for the `iris.quickplot.contour` function.""" # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests import numpy as np from iris.tests.stock import simple_2d from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords if tests.MPL_AVAILABLE: import iris.quickplot as qplt @tests.skip_plot class TestStringCoordPlot(TestGraphicStringCoord): def test_yaxis_labels(self): qplt.contour(self.cube, coords=("bar", "str_coord")) self.assertPointsTickLabels("yaxis") def test_xaxis_labels(self): qplt.contour(self.cube, coords=("str_coord", "bar")) self.assertPointsTickLabels("xaxis") @tests.skip_plot class TestCoords(tests.IrisTest, MixinCoords): def setUp(self): # We have a 2d cube with dimensionality (bar: 3; foo: 4) self.cube = simple_2d(with_bounds=False) self.foo = self.cube.coord("foo").points self.foo_index = np.arange(self.foo.size) self.bar = self.cube.coord("bar").points self.bar_index = np.arange(self.bar.size) self.data = self.cube.data self.dataT = self.data.T self.mpl_patch = self.patch("matplotlib.pyplot.contour") self.draw_func = qplt.contour if __name__ == "__main__": tests.main()
lgpl-3.0
to266/hyperspy
hyperspy/drawing/_widgets/circle.py
3
7415
# -*- coding: utf-8 -*- # Copyright 2007-2016 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import numpy as np import matplotlib.pyplot as plt from hyperspy.drawing.widgets import Widget2DBase, ResizersMixin class CircleWidget(Widget2DBase, ResizersMixin): """CircleWidget is a symmetric, Cicle-patch based widget, which can be dragged, and resized by keystrokes/code. """ def __init__(self, axes_manager, **kwargs): super(CircleWidget, self).__init__(axes_manager, **kwargs) self.size_step = 1.0 self.size_snap_offset = (0.5 + 1e-8) def _set_axes(self, axes): super(CircleWidget, self)._set_axes(axes) if self.axes: self._size[0] = (0.5 + 1e-8) * self.axes[0].scale if len(self.axes) > 1: self._size[1] = 0 def _do_snap_size(self, value=None): # Snap to odd diameters = ?.5 radius value = np.array(value) if value is not None else self._size snap_offset = self.size_snap_offset * self.axes[0].scale snap_spacing = self.axes[0].scale * self.size_step for i in range(2): value[i] = max(0, (round((value[i] - snap_offset) / snap_spacing) * snap_spacing + snap_offset)) return value def _set_size(self, value): """Setter for the 'size' property. Calls _size_changed to handle size change, if the value has changed. """ # Override so that r_inner can be 0 value = np.minimum(value, [0.5 * ax.size * ax.scale for ax in self.axes]) # Changed from base: min_sizes = np.array(((0.5 + 1e-8) * self.axes[0].scale, 0)) value = np.maximum(value, min_sizes) if value[0] < value[1]: self._set_size(value[::-1]) else: if self.snap_size: value = self._do_snap_size(value) if np.any(self._size != value): self._size = value self._size_changed() def increase_size(self): """Increment all sizes by one step. Applied via 'size' property. """ s = np.array(self.size) if self.size[1] > 0: s += self.size_step * self.axes[0].scale else: s[0] += self.size_step * self.axes[0].scale self.size = s def decrease_size(self): """Decrement all sizes by one step. Applied via 'size' property. """ s = np.array(self.size) if self.size[1] > 0: s -= self.size_step * self.axes[0].scale else: s[0] -= self.size_step * self.axes[0].scale self.size = s def get_centre(self): return self.position def _get_patch_xy(self): """Returns the xy coordinates of the patch. In this implementation, the patch is centered on the position. """ return self.position def _set_patch(self): """Sets the patch to a matplotlib Circle with the correct geometry. The geometry is defined by _get_patch_xy, and size. """ super(CircleWidget, self)._set_patch() xy = self._get_patch_xy() ro, ri = self.size self.patch = [plt.Circle( xy, radius=ro, animated=self.blit, fill=False, lw=self.border_thickness, ec=self.color, picker=True,)] if ri > 0: self.patch.append( plt.Circle( xy, radius=ro, animated=self.blit, fill=False, lw=self.border_thickness, ec=self.color, picker=True,)) def _validate_pos(self, value): """Constrict the position within bounds. """ value = (min(value[0], self.axes[0].high_value - self._size[0] + (0.5 + 1e-8) * self.axes[0].scale), min(value[1], self.axes[1].high_value - self._size[0] + (0.5 + 1e-8) * self.axes[1].scale)) value = (max(value[0], self.axes[0].low_value + self._size[0] - (0.5 + 1e-8) * self.axes[0].scale), max(value[1], self.axes[1].low_value + self._size[0] - (0.5 + 1e-8) * self.axes[1].scale)) return super(CircleWidget, self)._validate_pos(value) def get_size_in_indices(self): return np.array(self._size / self.axes[0].scale) def _update_patch_position(self): if self.is_on() and self.patch: self.patch[0].center = self._get_patch_xy() if self.size[1] > 0: self.patch[1].center = self.patch[0].center self._update_resizers() self.draw_patch() def _update_patch_size(self): if self.is_on() and self.patch: ro, ri = self.size self.patch[0].radius = ro if ri > 0: self.patch[1].radius = ri self._update_resizers() self.draw_patch() def _update_patch_geometry(self): if self.is_on() and self.patch: ro, ri = self.size self.patch[0].center = self._get_patch_xy() self.patch[0].radius = ro if ri > 0: self.patch[1].center = self.patch[0].center self.patch[1].radius = ri self._update_resizers() self.draw_patch() def _onmousemove(self, event): 'on mouse motion move the patch if picked' if self.picked is True and event.inaxes: x = event.xdata y = event.ydata if self.resizer_picked is False: x -= self.pick_offset[0] y -= self.pick_offset[1] self.position = (x, y) else: rad_vect = np.array((x, y)) - self._pos radius = np.sqrt(np.sum(rad_vect**2)) s = list(self.size) if self.resizer_picked < 4: s[0] = radius else: s[1] = radius self.size = s def _get_resizer_pos(self): positions = [] indices = (0, 1) if self.size[1] > 0 else (0, ) for i in indices: r = self._size[i] rsize = self._get_resizer_size() / 2 rp = np.array(self._get_patch_xy()) p = rp - (r, 0) - rsize # Left positions.append(p) p = rp - (0, r) - rsize # Top positions.append(p) p = rp + (r, 0) - rsize # Right positions.append(p) p = rp + (0, r) - rsize # Bottom positions.append(p) return positions
gpl-3.0
rubikloud/scikit-learn
examples/exercises/plot_iris_exercise.py
323
1602
""" ================================ SVM Exercise ================================ A tutorial exercise for using different SVM kernels. This exercise is used in the :ref:`using_kernels_tut` part of the :ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, svm iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 0, :2] y = y[y != 0] n_sample = len(X) np.random.seed(0) order = np.random.permutation(n_sample) X = X[order] y = y[order].astype(np.float) X_train = X[:.9 * n_sample] y_train = y[:.9 * n_sample] X_test = X[.9 * n_sample:] y_test = y[.9 * n_sample:] # fit the model for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')): clf = svm.SVC(kernel=kernel, gamma=10) clf.fit(X_train, y_train) plt.figure(fig_num) plt.clf() plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired) # Circle out the test data plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10) plt.axis('tight') x_min = X[:, 0].min() x_max = X[:, 0].max() y_min = X[:, 1].min() y_max = X[:, 1].max() XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.title(kernel) plt.show()
bsd-3-clause
sumspr/scikit-learn
sklearn/neighbors/tests/test_ball_tree.py
129
10192
import pickle import numpy as np from numpy.testing import assert_array_almost_equal from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap, simultaneous_sort, kernel_norm, nodeheap_sort, DTYPE, ITYPE) from sklearn.neighbors.dist_metrics import DistanceMetric from sklearn.utils.testing import SkipTest, assert_allclose rng = np.random.RandomState(10) V = rng.rand(3, 3) V = np.dot(V, V.T) DIMENSION = 3 METRICS = {'euclidean': {}, 'manhattan': {}, 'minkowski': dict(p=3), 'chebyshev': {}, 'seuclidean': dict(V=np.random.random(DIMENSION)), 'wminkowski': dict(p=3, w=np.random.random(DIMENSION)), 'mahalanobis': dict(V=V)} DISCRETE_METRICS = ['hamming', 'canberra', 'braycurtis'] BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath'] def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) def brute_force_neighbors(X, Y, k, metric, **kwargs): D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X) ind = np.argsort(D, axis=1)[:, :k] dist = D[np.arange(Y.shape[0])[:, None], ind] return dist, ind def test_ball_tree_query(): np.random.seed(0) X = np.random.random((40, DIMENSION)) Y = np.random.random((10, DIMENSION)) def check_neighbors(dualtree, breadth_first, k, metric, kwargs): bt = BallTree(X, leaf_size=1, metric=metric, **kwargs) dist1, ind1 = bt.query(Y, k, dualtree=dualtree, breadth_first=breadth_first) dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs) # don't check indices here: if there are any duplicate distances, # the indices may not match. Distances should not have this problem. assert_array_almost_equal(dist1, dist2) for (metric, kwargs) in METRICS.items(): for k in (1, 3, 5): for dualtree in (True, False): for breadth_first in (True, False): yield (check_neighbors, dualtree, breadth_first, k, metric, kwargs) def test_ball_tree_query_boolean_metrics(): np.random.seed(0) X = np.random.random((40, 10)).round(0) Y = np.random.random((10, 10)).round(0) k = 5 def check_neighbors(metric): bt = BallTree(X, leaf_size=1, metric=metric) dist1, ind1 = bt.query(Y, k) dist2, ind2 = brute_force_neighbors(X, Y, k, metric) assert_array_almost_equal(dist1, dist2) for metric in BOOLEAN_METRICS: yield check_neighbors, metric def test_ball_tree_query_discrete_metrics(): np.random.seed(0) X = (4 * np.random.random((40, 10))).round(0) Y = (4 * np.random.random((10, 10))).round(0) k = 5 def check_neighbors(metric): bt = BallTree(X, leaf_size=1, metric=metric) dist1, ind1 = bt.query(Y, k) dist2, ind2 = brute_force_neighbors(X, Y, k, metric) assert_array_almost_equal(dist1, dist2) for metric in DISCRETE_METRICS: yield check_neighbors, metric def test_ball_tree_query_radius(n_samples=100, n_features=10): np.random.seed(0) X = 2 * np.random.random(size=(n_samples, n_features)) - 1 query_pt = np.zeros(n_features, dtype=float) eps = 1E-15 # roundoff error can cause test to fail bt = BallTree(X, leaf_size=5) rad = np.sqrt(((X - query_pt) ** 2).sum(1)) for r in np.linspace(rad[0], rad[-1], 100): ind = bt.query_radius(query_pt, r + eps)[0] i = np.where(rad <= r + eps)[0] ind.sort() i.sort() assert_array_almost_equal(i, ind) def test_ball_tree_query_radius_distance(n_samples=100, n_features=10): np.random.seed(0) X = 2 * np.random.random(size=(n_samples, n_features)) - 1 query_pt = np.zeros(n_features, dtype=float) eps = 1E-15 # roundoff error can cause test to fail bt = BallTree(X, leaf_size=5) rad = np.sqrt(((X - query_pt) ** 2).sum(1)) for r in np.linspace(rad[0], rad[-1], 100): ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True) ind = ind[0] dist = dist[0] d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1)) assert_array_almost_equal(d, dist) def compute_kernel_slow(Y, X, kernel, h): d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(h, X.shape[1], kernel) if kernel == 'gaussian': return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) elif kernel == 'tophat': return norm * (d < h).sum(-1) elif kernel == 'epanechnikov': return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) elif kernel == 'exponential': return norm * (np.exp(-d / h)).sum(-1) elif kernel == 'linear': return norm * ((1 - d / h) * (d < h)).sum(-1) elif kernel == 'cosine': return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) else: raise ValueError('kernel not recognized') def test_ball_tree_kde(n_samples=100, n_features=3): np.random.seed(0) X = np.random.random((n_samples, n_features)) Y = np.random.random((n_samples, n_features)) bt = BallTree(X, leaf_size=10) for kernel in ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']: for h in [0.01, 0.1, 1]: dens_true = compute_kernel_slow(Y, X, kernel, h) def check_results(kernel, h, atol, rtol, breadth_first): dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first) assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-7)) for rtol in [0, 1E-5]: for atol in [1E-6, 1E-2]: for breadth_first in (True, False): yield (check_results, kernel, h, atol, rtol, breadth_first) def test_gaussian_kde(n_samples=1000): # Compare gaussian KDE results to scipy.stats.gaussian_kde from scipy.stats import gaussian_kde np.random.seed(0) x_in = np.random.normal(0, 1, n_samples) x_out = np.linspace(-5, 5, 30) for h in [0.01, 0.1, 1]: bt = BallTree(x_in[:, None]) try: gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in)) except TypeError: raise SkipTest("Old version of scipy, doesn't accept " "explicit bandwidth.") dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples dens_gkde = gkde.evaluate(x_out) assert_array_almost_equal(dens_bt, dens_gkde, decimal=3) def test_ball_tree_two_point(n_samples=100, n_features=3): np.random.seed(0) X = np.random.random((n_samples, n_features)) Y = np.random.random((n_samples, n_features)) r = np.linspace(0, 1, 10) bt = BallTree(X, leaf_size=10) D = DistanceMetric.get_metric("euclidean").pairwise(Y, X) counts_true = [(D <= ri).sum() for ri in r] def check_two_point(r, dualtree): counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree) assert_array_almost_equal(counts, counts_true) for dualtree in (True, False): yield check_two_point, r, dualtree def test_ball_tree_pickle(): np.random.seed(0) X = np.random.random((10, 3)) bt1 = BallTree(X, leaf_size=1) # Test if BallTree with callable metric is picklable bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2) ind1, dist1 = bt1.query(X) ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X) def check_pickle_protocol(protocol): s = pickle.dumps(bt1, protocol=protocol) bt2 = pickle.loads(s) s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol) bt2_pyfunc = pickle.loads(s_pyfunc) ind2, dist2 = bt2.query(X) ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X) assert_array_almost_equal(ind1, ind2) assert_array_almost_equal(dist1, dist2) assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc) assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc) for protocol in (0, 1, 2): yield check_pickle_protocol, protocol def test_neighbors_heap(n_pts=5, n_nbrs=10): heap = NeighborsHeap(n_pts, n_nbrs) for row in range(n_pts): d_in = np.random.random(2 * n_nbrs).astype(DTYPE) i_in = np.arange(2 * n_nbrs, dtype=ITYPE) for d, i in zip(d_in, i_in): heap.push(row, d, i) ind = np.argsort(d_in) d_in = d_in[ind] i_in = i_in[ind] d_heap, i_heap = heap.get_arrays(sort=True) assert_array_almost_equal(d_in[:n_nbrs], d_heap[row]) assert_array_almost_equal(i_in[:n_nbrs], i_heap[row]) def test_node_heap(n_nodes=50): vals = np.random.random(n_nodes).astype(DTYPE) i1 = np.argsort(vals) vals2, i2 = nodeheap_sort(vals) assert_array_almost_equal(i1, i2) assert_array_almost_equal(vals[i1], vals2) def test_simultaneous_sort(n_rows=10, n_pts=201): dist = np.random.random((n_rows, n_pts)).astype(DTYPE) ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE) dist2 = dist.copy() ind2 = ind.copy() # simultaneous sort rows using function simultaneous_sort(dist, ind) # simultaneous sort rows using numpy i = np.argsort(dist2, axis=1) row_ind = np.arange(n_rows)[:, None] dist2 = dist2[row_ind, i] ind2 = ind2[row_ind, i] assert_array_almost_equal(dist, dist2) assert_array_almost_equal(ind, ind2) def test_query_haversine(): np.random.seed(0) X = 2 * np.pi * np.random.random((40, 2)) bt = BallTree(X, leaf_size=1, metric='haversine') dist1, ind1 = bt.query(X, k=5) dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine') assert_array_almost_equal(dist1, dist2) assert_array_almost_equal(ind1, ind2)
bsd-3-clause
astocko/statsmodels
statsmodels/sandbox/distributions/otherdist.py
33
10145
'''Parametric Mixture Distributions Created on Sat Jun 04 2011 Author: Josef Perktold Notes: Compound Poisson has mass point at zero http://en.wikipedia.org/wiki/Compound_Poisson_distribution and would need special treatment need a distribution that has discrete mass points and contiuous range, e.g. compound Poisson, Tweedie (for some parameter range), pdf of Tobit model (?) - truncation with clipping Question: Metaclasses and class factories for generating new distributions from existing distributions by transformation, mixing, compounding ''' from __future__ import print_function import numpy as np from scipy import stats class ParametricMixtureD(object): '''mixtures with a discrete distribution The mixing distribution is a discrete distribution like scipy.stats.poisson. All distribution in the mixture of the same type and parameterized by the outcome of the mixing distribution and have to be a continuous distribution (or have a pdf method). As an example, a mixture of normal distributed random variables with Poisson as the mixing distribution. assumes vectorized shape, loc and scale as in scipy.stats.distributions assume mixing_dist is frozen initialization looks fragile for all possible cases of lower and upper bounds of the distributions. ''' def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func, cutoff=1e-3): '''create a mixture distribution Parameters ---------- mixing_dist : discrete frozen distribution mixing distribution base_dist : continuous distribution parameterized distributions in the mixture bd_args_func : callable function that builds the tuple of args for the base_dist. The function obtains as argument the values in the support of the mixing distribution and should return an empty tuple or a tuple of arrays. bd_kwds_func : callable function that builds the dictionary of kwds for the base_dist. The function obtains as argument the values in the support of the mixing distribution and should return an empty dictionary or a dictionary with arrays as values. cutoff : float If the mixing distribution has infinite support, then the distribution is truncated with approximately (subject to integer conversion) the cutoff probability in the missing tail. Random draws that are outside the truncated range are clipped, that is assigned to the highest or lowest value in the truncated support. ''' self.mixing_dist = mixing_dist self.base_dist = base_dist #self.bd_args = bd_args if not np.isneginf(mixing_dist.dist.a): lower = mixing_dist.dist.a else: lower = mixing_dist.ppf(1e-4) if not np.isposinf(mixing_dist.dist.b): upper = mixing_dist.dist.b else: upper = mixing_dist.isf(1e-4) self.ma = lower self.mb = upper mixing_support = np.arange(lower, upper+1) self.mixing_probs = mixing_dist.pmf(mixing_support) self.bd_args = bd_args_func(mixing_support) self.bd_kwds = bd_kwds_func(mixing_support) def rvs(self, size=1): mrvs = self.mixing_dist.rvs(size) #TODO: check strange cases ? this assumes continous integers mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int) bd_args = tuple(md[mrvs_idx] for md in self.bd_args) bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds) kwds = {'size':size} kwds.update(bd_kwds) rvs = self.base_dist.rvs(*self.bd_args, **kwds) return rvs, mrvs_idx def pdf(self, x): x = np.asarray(x) if np.size(x) > 1: x = x[...,None] #[None, ...] bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds) prob = (bd_probs * self.mixing_probs).sum(-1) return prob, bd_probs def cdf(self, x): x = np.asarray(x) if np.size(x) > 1: x = x[...,None] #[None, ...] bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds) prob = (bd_probs * self.mixing_probs).sum(-1) return prob, bd_probs #try: class ClippedContinuous(object): '''clipped continuous distribution with a masspoint at clip_lower Notes ----- first version, to try out possible designs insufficient checks for valid arguments and not clear whether it works for distributions that have compact support clip_lower is fixed and independent of the distribution parameters. The clip_lower point in the pdf has to be interpreted as a mass point, i.e. different treatment in integration and expect function, which means none of the generic methods for this can be used. maybe this will be better designed as a mixture between a degenerate or discrete and a continuous distribution Warning: uses equality to check for clip_lower values in function arguments, since these are floating points, the comparison might fail if clip_lower values are not exactly equal. We could add a check whether the values are in a small neighborhood, but it would be expensive (need to search and check all values). ''' def __init__(self, base_dist, clip_lower): self.base_dist = base_dist self.clip_lower = clip_lower def _get_clip_lower(self, kwds): '''helper method to get clip_lower from kwds or attribute ''' if not 'clip_lower' in kwds: clip_lower = self.clip_lower else: clip_lower = kwds.pop('clip_lower') return clip_lower, kwds def rvs(self, *args, **kwds): clip_lower, kwds = self._get_clip_lower(kwds) rvs_ = self.base_dist.rvs(*args, **kwds) #same as numpy.clip ? rvs_[rvs_ < clip_lower] = clip_lower return rvs_ def pdf(self, x, *args, **kwds): x = np.atleast_1d(x) if not 'clip_lower' in kwds: clip_lower = self.clip_lower else: #allow clip_lower to be a possible parameter clip_lower = kwds.pop('clip_lower') pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds)) clip_mask = (x == self.clip_lower) if np.any(clip_mask): clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds) pdf_raw[clip_mask] = clip_prob #the following will be handled by sub-classing rv_continuous pdf_raw[x < clip_lower] = 0 return pdf_raw def cdf(self, x, *args, **kwds): if not 'clip_lower' in kwds: clip_lower = self.clip_lower else: #allow clip_lower to be a possible parameter clip_lower = kwds.pop('clip_lower') cdf_raw = self.base_dist.cdf(x, *args, **kwds) #not needed if equality test is used ## clip_mask = (x == self.clip_lower) ## if np.any(clip_mask): ## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds) ## pdf_raw[clip_mask] = clip_prob #the following will be handled by sub-classing rv_continuous #if self.a is defined cdf_raw[x < clip_lower] = 0 return cdf_raw def sf(self, x, *args, **kwds): if not 'clip_lower' in kwds: clip_lower = self.clip_lower else: #allow clip_lower to be a possible parameter clip_lower = kwds.pop('clip_lower') sf_raw = self.base_dist.sf(x, *args, **kwds) sf_raw[x <= clip_lower] = 1 return sf_raw def ppf(self, x, *args, **kwds): raise NotImplementedError def plot(self, x, *args, **kwds): clip_lower, kwds = self._get_clip_lower(kwds) mass = self.pdf(clip_lower, *args, **kwds) xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower])) import matplotlib.pyplot as plt #x = np.linspace(-4, 4, 21) #plt.figure() plt.xlim(clip_lower-0.1, x.max()) #remove duplicate calculation xpdf = self.pdf(x, *args, **kwds) plt.ylim(0, max(mass, xpdf.max())*1.1) plt.plot(xr, self.pdf(xr, *args, **kwds)) #plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds)) plt.stem([clip_lower], [mass], linefmt='b-', markerfmt='bo', basefmt='r-') return if __name__ == '__main__': doplots = 1 #*********** Poisson-Normal Mixture mdist = stats.poisson(2.) bdist = stats.norm bd_args_fn = lambda x: () #bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))} bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)} pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn) print(pd.pdf(1)) p, bp = pd.pdf(np.linspace(0,20,21)) pc, bpc = pd.cdf(np.linspace(0,20,21)) print(pd.rvs()) rvs, m = pd.rvs(size=1000) if doplots: import matplotlib.pyplot as plt plt.hist(rvs, bins = 100) plt.title('poisson mixture of normal distributions') #********** clipped normal distribution (Tobit) bdist = stats.norm clip_lower_ = 0. #-0.5 cnorm = ClippedContinuous(bdist, clip_lower_) x = np.linspace(1e-8, 4, 11) print(cnorm.pdf(x)) print(cnorm.cdf(x)) if doplots: #plt.figure() #cnorm.plot(x) plt.figure() cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2)) plt.title('clipped normal distribution') fig = plt.figure() for i, loc in enumerate([0., 0.5, 1.,2.]): fig.add_subplot(2,2,i+1) cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2)) plt.title('clipped normal, loc = %3.2f' % loc) loc = 1.5 rvs = cnorm.rvs(loc=loc, size=2000) plt.figure() plt.hist(rvs, bins=50) plt.title('clipped normal rvs, loc = %3.2f' % loc) #plt.show()
bsd-3-clause
elijah513/scikit-learn
examples/neighbors/plot_species_kde.py
282
4059
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric -- i.e. distances over points in latitude/longitude. The dataset is provided by Phillips et. al. (2006). If available, the example uses `basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_ to plot the coast lines and national boundaries of South America. This example does not perform any learning over the data (see :ref:`example_applications_plot_species_distribution_modeling.py` for an example of classification based on the attributes in this dataset). It simply shows the kernel density estimate of observed data points in geospatial coordinates. The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Author: Jake Vanderplas <jakevdp@cs.washington.edu> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_species_distributions from sklearn.datasets.species_distributions import construct_grids from sklearn.neighbors import KernelDensity # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False # Get matrices/arrays of species IDs and locations data = fetch_species_distributions() species_names = ['Bradypus Variegatus', 'Microryzomys Minutus'] Xtrain = np.vstack([data['train']['dd lat'], data['train']['dd long']]).T ytrain = np.array([d.decode('ascii').startswith('micro') for d in data['train']['species']], dtype='int') Xtrain *= np.pi / 180. # Convert lat/long to radians # Set up the data grid for the contour plot xgrid, ygrid = construct_grids(data) X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1]) land_reference = data.coverages[6][::5, ::5] land_mask = (land_reference > -9999).ravel() xy = np.vstack([Y.ravel(), X.ravel()]).T xy = xy[land_mask] xy *= np.pi / 180. # Plot map of South America with distributions of each species fig = plt.figure() fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05) for i in range(2): plt.subplot(1, 2, i + 1) # construct a kernel density estimate of the distribution print(" - computing KDE in spherical coordinates") kde = KernelDensity(bandwidth=0.04, metric='haversine', kernel='gaussian', algorithm='ball_tree') kde.fit(Xtrain[ytrain == i]) # evaluate only on the land: -9999 indicates ocean Z = -9999 + np.zeros(land_mask.shape[0]) Z[land_mask] = np.exp(kde.score_samples(xy)) Z = Z.reshape(X.shape) # plot contours of the density levels = np.linspace(0, Z.max(), 25) plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) if basemap: print(" - plot coastlines using basemap") m = Basemap(projection='cyl', llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution='c') m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, levels=[-9999], colors="k", linestyles="solid") plt.xticks([]) plt.yticks([]) plt.title(species_names[i]) plt.show()
bsd-3-clause
untom/scikit-learn
examples/applications/face_recognition.py
15
5394
""" =================================================== Faces recognition example using eigenfaces and SVMs =================================================== The dataset used in this example is a preprocessed excerpt of the "Labeled Faces in the Wild", aka LFW_: http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB) .. _LFW: http://vis-www.cs.umass.edu/lfw/ Expected results for the top 5 most represented people in the dataset:: precision recall f1-score support Gerhard_Schroeder 0.91 0.75 0.82 28 Donald_Rumsfeld 0.84 0.82 0.83 33 Tony_Blair 0.65 0.82 0.73 34 Colin_Powell 0.78 0.88 0.83 58 George_W_Bush 0.93 0.86 0.90 129 avg / total 0.86 0.84 0.85 282 """ from __future__ import print_function from time import time import logging import matplotlib.pyplot as plt from sklearn.cross_validation import train_test_split from sklearn.datasets import fetch_lfw_people from sklearn.grid_search import GridSearchCV from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.decomposition import RandomizedPCA from sklearn.svm import SVC print(__doc__) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') ############################################################################### # Download the data, if not already on disk and load it as numpy arrays lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) # introspect the images arrays to find the shapes (for plotting) n_samples, h, w = lfw_people.images.shape # for machine learning we use the 2 data directly (as relative pixel # positions info is ignored by this model) X = lfw_people.data n_features = X.shape[1] # the label to predict is the id of the person y = lfw_people.target target_names = lfw_people.target_names n_classes = target_names.shape[0] print("Total dataset size:") print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) print("n_classes: %d" % n_classes) ############################################################################### # Split into a training set and a test set using a stratified k fold # split into a training and testing set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25) ############################################################################### # Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled # dataset): unsupervised feature extraction / dimensionality reduction n_components = 150 print("Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])) t0 = time() pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train) print("done in %0.3fs" % (time() - t0)) eigenfaces = pca.components_.reshape((n_components, h, w)) print("Projecting the input data on the eigenfaces orthonormal basis") t0 = time() X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) print("done in %0.3fs" % (time() - t0)) ############################################################################### # Train a SVM classification model print("Fitting the classifier to the training set") t0 = time() param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], } clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid) clf = clf.fit(X_train_pca, y_train) print("done in %0.3fs" % (time() - t0)) print("Best estimator found by grid search:") print(clf.best_estimator_) ############################################################################### # Quantitative evaluation of the model quality on the test set print("Predicting people's names on the test set") t0 = time() y_pred = clf.predict(X_test_pca) print("done in %0.3fs" % (time() - t0)) print(classification_report(y_test, y_pred, target_names=target_names)) print(confusion_matrix(y_test, y_pred, labels=range(n_classes))) ############################################################################### # Qualitative evaluation of the predictions using matplotlib def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(()) # plot the result of the prediction on a portion of the test set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] return 'predicted: %s\ntrue: %s' % (pred_name, true_name) prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])] plot_gallery(X_test, prediction_titles, h, w) # plot the gallery of the most significative eigenfaces eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])] plot_gallery(eigenfaces, eigenface_titles, h, w) plt.show()
bsd-3-clause
pyannote/pyannote-audio
pyannote/audio/tasks/segmentation/segmentation.py
1
17573
# MIT License # # Copyright (c) 2020-2021 CNRS # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import math from collections import Counter from typing import Text, Tuple, Union import matplotlib.pyplot as plt import numpy as np import torch from torch_audiomentations.core.transforms_interface import BaseWaveformTransform from typing_extensions import Literal from pyannote.audio.core.task import Problem, Resolution, Specifications, Task from pyannote.audio.tasks.segmentation.mixins import SegmentationTaskMixin from pyannote.audio.utils.loss import binary_cross_entropy, mse_loss from pyannote.audio.utils.permutation import permutate from pyannote.core import SlidingWindow from pyannote.database import Protocol class Segmentation(SegmentationTaskMixin, Task): """Segmentation Note that data augmentation is used to increase the proportion of "overlap". This is achieved by generating chunks made out of the (weighted) sum of two random chunks. Parameters ---------- protocol : Protocol pyannote.database protocol duration : float, optional Chunks duration. Defaults to 2s. warm_up : float or (float, float), optional Use that many seconds on the left- and rightmost parts of each chunk to warm up the model. While the model does process those left- and right-most parts, only the remaining central part of each chunk is used for computing the loss during training, and for aggregating scores during inference. Defaults to 0. (i.e. no warm-up). balance: str, optional When provided, training samples are sampled uniformly with respect to that key. For instance, setting `balance` to "uri" will make sure that each file will be equally represented in the training samples. overlap: dict, optional Controls how artificial chunks with overlapping speech are generated: - "probability" key is the probability of artificial overlapping chunks. Setting "probability" to 0.6 means that, on average, 40% of training chunks are "real" chunks, while 60% are artifical chunks made out of the (weighted) sum of two chunks. Defaults to 0.5. - "snr_min" and "snr_max" keys control the minimum and maximum signal-to-noise ratio between summed chunks, in dB. Default to 0.0 and 10. weight: str, optional When provided, use this key to as frame-wise weight in loss function. batch_size : int, optional Number of training samples per batch. Defaults to 32. num_workers : int, optional Number of workers used for generating training samples. Defaults to multiprocessing.cpu_count() // 2. pin_memory : bool, optional If True, data loaders will copy tensors into CUDA pinned memory before returning them. See pytorch documentation for more details. Defaults to False. augmentation : BaseWaveformTransform, optional torch_audiomentations waveform transform, used by dataloader during training. vad_loss : {"bce", "mse"}, optional Add voice activity detection loss. """ ACRONYM = "seg" OVERLAP_DEFAULTS = {"probability": 0.5, "snr_min": 0.0, "snr_max": 10.0} def __init__( self, protocol: Protocol, duration: float = 2.0, warm_up: Union[float, Tuple[float, float]] = 0.0, overlap: dict = OVERLAP_DEFAULTS, balance: Text = None, weight: Text = None, batch_size: int = 32, num_workers: int = None, pin_memory: bool = False, augmentation: BaseWaveformTransform = None, loss: Literal["bce", "mse"] = "bce", vad_loss: Literal["bce", "mse"] = None, ): super().__init__( protocol, duration=duration, warm_up=warm_up, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, augmentation=augmentation, ) self.overlap = overlap self.balance = balance self.weight = weight if loss not in ["bce", "mse"]: raise ValueError("'loss' must be one of {'bce', 'mse'}.") self.loss = loss self.vad_loss = vad_loss def setup(self, stage=None): super().setup(stage=stage) if stage == "fit": # slide a window (with 1s step) over the whole training set # and keep track of the number of speakers in each location num_speakers = [] for file in self._train: start = file["annotated"][0].start end = file["annotated"][-1].end window = SlidingWindow( start=start, end=end, duration=self.duration, step=1.0, ) for chunk in window: num_speakers.append(len(file["annotation"].crop(chunk).labels())) # because there might a few outliers, estimate the upper bound for the # number of speakers as the 99th percentile num_speakers, counts = zip(*list(Counter(num_speakers).items())) num_speakers, counts = np.array(num_speakers), np.array(counts) sorting_indices = np.argsort(num_speakers) num_speakers = num_speakers[sorting_indices] counts = counts[sorting_indices] self.num_speakers = num_speakers[ np.where(np.cumsum(counts) / np.sum(counts) > 0.99)[0][0] ] # TODO: add a few more speakers to make sure we don't skip # too many artificial chunks (which might result in less # overlap that we think we have) # now that we know about the number of speakers upper bound # we can set task specifications self.specifications = Specifications( problem=Problem.MULTI_LABEL_CLASSIFICATION, resolution=Resolution.FRAME, duration=self.duration, warm_up=self.warm_up, classes=[f"speaker#{i+1}" for i in range(self.num_speakers)], permutation_invariant=True, ) def prepare_y(self, one_hot_y: np.ndarray): """Zero-pad segmentation targets Parameters ---------- one_hot_y : (num_frames, num_speakers) np.ndarray One-hot-encoding of current chunk speaker activity: * one_hot_y[t, k] = 1 if kth speaker is active at tth frame * one_hot_y[t, k] = 0 otherwise. Returns ------- padded_one_hot_y : (num_frames, self.num_speakers) np.ndarray One-hot-encoding of current chunk speaker activity: * one_hot_y[t, k] = 1 if kth speaker is active at tth frame * one_hot_y[t, k] = 0 otherwise. """ num_frames, num_speakers = one_hot_y.shape if num_speakers > self.num_speakers: raise ValueError() if num_speakers < self.num_speakers: one_hot_y = np.pad( one_hot_y, ((0, 0), (0, self.num_speakers - num_speakers)) ) return one_hot_y def val__getitem__(self, idx): f, chunk = self._validation[idx] sample = self.prepare_chunk(f, chunk, duration=self.duration, stage="val") y, labels = sample["y"], sample.pop("labels") # since number of speakers is estimated from the training set, # we might encounter validation chunks that have more speakers. # in that case, we arbitrarily remove last speakers if y.shape[1] > self.num_speakers: y = y[:, : self.num_speakers] labels = labels[: self.num_speakers] sample["y"] = self.prepare_y(y) return sample def segmentation_loss( self, permutated_prediction: torch.Tensor, target: torch.Tensor, weight: torch.Tensor = None, ) -> torch.Tensor: """Permutation-invariant segmentation loss Parameters ---------- permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor Permutated speaker activity predictions. target : (batch_size, num_frames, num_speakers) torch.Tensor Speaker activity. weight : (batch_size, num_frames, 1) torch.Tensor, optional Frames weight. Returns ------- seg_loss : torch.Tensor Permutation-invariant segmentation loss """ if self.loss == "bce": seg_loss = binary_cross_entropy( permutated_prediction, target.float(), weight=weight ) elif self.loss == "mse": seg_loss = mse_loss(permutated_prediction, target.float(), weight=weight) return seg_loss def voice_activity_detection_loss( self, permutated_prediction: torch.Tensor, target: torch.Tensor, weight: torch.Tensor = None, ) -> torch.Tensor: """Voice activity detection loss Parameters ---------- permutated_prediction : (batch_size, num_frames, num_classes) torch.Tensor Speaker activity predictions. target : (batch_size, num_frames, num_speakers) torch.Tensor Speaker activity. weight : (batch_size, num_frames, 1) torch.Tensor, optional Frames weight. Returns ------- vad_loss : torch.Tensor Voice activity detection loss. """ vad_prediction, _ = torch.max(permutated_prediction, dim=2, keepdim=True) # (batch_size, num_frames, 1) vad_target, _ = torch.max(target.float(), dim=2, keepdim=False) # (batch_size, num_frames) if self.vad_loss == "bce": loss = binary_cross_entropy(vad_prediction, vad_target, weight=weight) elif self.vad_loss == "mse": loss = mse_loss(vad_prediction, vad_target, weight=weight) return loss def training_step(self, batch, batch_idx: int): """Compute permutation-invariant binary cross-entropy Parameters ---------- batch : (usually) dict of torch.Tensor Current batch. batch_idx: int Batch index. Returns ------- loss : {str: torch.tensor} {"loss": loss} """ # forward pass prediction = self.model(batch["X"]) batch_size, num_frames, _ = prediction.shape # (batch_size, num_frames, num_classes) # target target = batch["y"] permutated_prediction, _ = permutate(target, prediction) # frames weight weight_key = getattr(self, "weight", None) weight = batch.get( weight_key, torch.ones(batch_size, num_frames, 1, device=self.model.device), ) # (batch_size, num_frames, 1) # warm-up warm_up_left = round(self.warm_up[0] / self.duration * num_frames) weight[:, :warm_up_left] = 0.0 warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 seg_loss = self.segmentation_loss(permutated_prediction, target, weight=weight) self.model.log( f"{self.ACRONYM}@train_seg_loss", seg_loss, on_step=False, on_epoch=True, prog_bar=False, logger=True, ) if self.vad_loss is None: vad_loss = 0.0 else: vad_loss = self.voice_activity_detection_loss( permutated_prediction, target, weight=weight ) self.model.log( f"{self.ACRONYM}@train_vad_loss", vad_loss, on_step=False, on_epoch=True, prog_bar=False, logger=True, ) loss = seg_loss + vad_loss self.model.log( f"{self.ACRONYM}@train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, ) return {"loss": loss} def validation_step(self, batch, batch_idx: int): """Compute validation F-score Parameters ---------- batch : dict of torch.Tensor Current batch. batch_idx: int Batch index. """ # move metric to model device self.val_fbeta.to(self.model.device) X, y = batch["X"], batch["y"] # X = (batch_size, num_channels, num_samples) # y = (batch_size, num_frames, num_classes) y_pred = self.model(X) _, num_frames, _ = y_pred.shape # y_pred = (batch_size, num_frames, num_classes) permutated_y_pred, _ = permutate(y, y_pred) warm_up_left = round(self.warm_up[0] / self.duration * num_frames) warm_up_right = round(self.warm_up[1] / self.duration * num_frames) val_fbeta = self.val_fbeta( permutated_y_pred[ :, warm_up_left : num_frames - warm_up_right : 10 ].squeeze(), y[:, warm_up_left : num_frames - warm_up_right : 10].squeeze(), ) self.model.log( f"{self.ACRONYM}@val_fbeta", val_fbeta, on_step=False, on_epoch=True, prog_bar=True, logger=True, ) # log first batch visualization every 2^n epochs. if ( self.model.current_epoch == 0 or math.log2(self.model.current_epoch) % 1 > 0 or batch_idx > 0 ): return # visualize first 9 validation samples of first batch in Tensorboard X = X.cpu().numpy() y = y.float().cpu().numpy() y_pred = y_pred.cpu().numpy() permutated_y_pred = permutated_y_pred.cpu().numpy() # prepare 3 x 3 grid (or smaller if batch size is smaller) num_samples = min(self.batch_size, 9) nrows = math.ceil(math.sqrt(num_samples)) ncols = math.ceil(num_samples / nrows) fig, axes = plt.subplots( nrows=4 * nrows, ncols=ncols, figsize=(15, 10), ) # reshape target so that there is one line per class when plottingit y[y == 0] = np.NaN y *= np.arange(y.shape[2]) # plot each sample for sample_idx in range(num_samples): # find where in the grid it should be plotted row_idx = sample_idx // nrows col_idx = sample_idx % ncols # plot waveform ax_wav = axes[row_idx * 4 + 0, col_idx] sample_X = np.mean(X[sample_idx], axis=0) ax_wav.plot(sample_X) ax_wav.set_xlim(0, len(sample_X)) ax_wav.get_xaxis().set_visible(False) ax_wav.get_yaxis().set_visible(False) # plot target ax_ref = axes[row_idx * 4 + 1, col_idx] sample_y = y[sample_idx] ax_ref.plot(sample_y) ax_ref.set_xlim(0, len(sample_y)) ax_ref.set_ylim(-1, sample_y.shape[1]) ax_ref.get_xaxis().set_visible(False) ax_ref.get_yaxis().set_visible(False) # plot prediction ax_hyp = axes[row_idx * 4 + 2, col_idx] sample_y_pred = y_pred[sample_idx] ax_hyp.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0) ax_hyp.axvspan( num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0 ) ax_hyp.plot(sample_y_pred) ax_hyp.set_ylim(-0.1, 1.1) ax_hyp.set_xlim(0, len(sample_y)) ax_hyp.get_xaxis().set_visible(False) # plot permutated prediction ax_map = axes[row_idx * 4 + 3, col_idx] sample_y_pred_map = permutated_y_pred[sample_idx] ax_map.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0) ax_map.axvspan( num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0 ) ax_map.plot(sample_y_pred_map) ax_map.set_ylim(-0.1, 1.1) ax_map.set_xlim(0, len(sample_y)) plt.tight_layout() self.model.logger.experiment.add_figure( f"{self.ACRONYM}@val_samples", fig, self.model.current_epoch ) plt.close(fig)
mit
MartinDelzant/scikit-learn
examples/linear_model/plot_ridge_path.py
254
1655
""" =========================================================== Plot Ridge coefficients as a function of the regularization =========================================================== Shows the effect of collinearity in the coefficients of an estimator. .. currentmodule:: sklearn.linear_model :class:`Ridge` Regression is the estimator used in this example. Each color represents a different feature of the coefficient vector, and this is displayed as a function of the regularization parameter. At the end of the path, as alpha tends toward zero and the solution tends towards the ordinary least squares, coefficients exhibit big oscillations. """ # Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr> # License: BSD 3 clause print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model # X is the 10x10 Hilbert matrix X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis]) y = np.ones(10) ############################################################################### # Compute paths n_alphas = 200 alphas = np.logspace(-10, -2, n_alphas) clf = linear_model.Ridge(fit_intercept=False) coefs = [] for a in alphas: clf.set_params(alpha=a) clf.fit(X, y) coefs.append(clf.coef_) ############################################################################### # Display results ax = plt.gca() ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm']) ax.plot(alphas, coefs) ax.set_xscale('log') ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis plt.xlabel('alpha') plt.ylabel('weights') plt.title('Ridge coefficients as a function of the regularization') plt.axis('tight') plt.show()
bsd-3-clause
jzt5132/scikit-learn
examples/ensemble/plot_bias_variance.py
357
7324
""" ============================================================ Single estimator versus bagging: bias-variance decomposition ============================================================ This example illustrates and compares the bias-variance decomposition of the expected mean squared error of a single estimator against a bagging ensemble. In regression, the expected mean squared error of an estimator can be decomposed in terms of bias, variance and noise. On average over datasets of the regression problem, the bias term measures the average amount by which the predictions of the estimator differ from the predictions of the best possible estimator for the problem (i.e., the Bayes model). The variance term measures the variability of the predictions of the estimator when fit over different instances LS of the problem. Finally, the noise measures the irreducible part of the error which is due the variability in the data. The upper left figure illustrates the predictions (in dark red) of a single decision tree trained over a random dataset LS (the blue dots) of a toy 1d regression problem. It also illustrates the predictions (in light red) of other single decision trees trained over other (and different) randomly drawn instances LS of the problem. Intuitively, the variance term here corresponds to the width of the beam of predictions (in light red) of the individual estimators. The larger the variance, the more sensitive are the predictions for `x` to small changes in the training set. The bias term corresponds to the difference between the average prediction of the estimator (in cyan) and the best possible model (in dark blue). On this problem, we can thus observe that the bias is quite low (both the cyan and the blue curves are close to each other) while the variance is large (the red beam is rather wide). The lower left figure plots the pointwise decomposition of the expected mean squared error of a single decision tree. It confirms that the bias term (in blue) is low while the variance is large (in green). It also illustrates the noise part of the error which, as expected, appears to be constant and around `0.01`. The right figures correspond to the same plots but using instead a bagging ensemble of decision trees. In both figures, we can observe that the bias term is larger than in the previous case. In the upper right figure, the difference between the average prediction (in cyan) and the best possible model is larger (e.g., notice the offset around `x=2`). In the lower right figure, the bias curve is also slightly higher than in the lower left figure. In terms of variance however, the beam of predictions is narrower, which suggests that the variance is lower. Indeed, as the lower right figure confirms, the variance term (in green) is lower than for single decision trees. Overall, the bias- variance decomposition is therefore no longer the same. The tradeoff is better for bagging: averaging several decision trees fit on bootstrap copies of the dataset slightly increases the bias term but allows for a larger reduction of the variance, which results in a lower overall mean squared error (compare the red curves int the lower figures). The script output also confirms this intuition. The total error of the bagging ensemble is lower than the total error of a single decision tree, and this difference indeed mainly stems from a reduced variance. For further details on bias-variance decomposition, see section 7.3 of [1]_. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning", Springer, 2009. """ print(__doc__) # Author: Gilles Louppe <g.louppe@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor # Settings n_repeat = 50 # Number of iterations for computing expectations n_train = 50 # Size of the training set n_test = 1000 # Size of the test set noise = 0.1 # Standard deviation of the noise np.random.seed(0) # Change this for exploring the bias-variance decomposition of other # estimators. This should work well for estimators with high variance (e.g., # decision trees or KNN), but poorly for estimators with low variance (e.g., # linear models). estimators = [("Tree", DecisionTreeRegressor()), ("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))] n_estimators = len(estimators) # Generate data def f(x): x = x.ravel() return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2) def generate(n_samples, noise, n_repeat=1): X = np.random.rand(n_samples) * 10 - 5 X = np.sort(X) if n_repeat == 1: y = f(X) + np.random.normal(0.0, noise, n_samples) else: y = np.zeros((n_samples, n_repeat)) for i in range(n_repeat): y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples) X = X.reshape((n_samples, 1)) return X, y X_train = [] y_train = [] for i in range(n_repeat): X, y = generate(n_samples=n_train, noise=noise) X_train.append(X) y_train.append(y) X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat) # Loop over estimators to compare for n, (name, estimator) in enumerate(estimators): # Compute predictions y_predict = np.zeros((n_test, n_repeat)) for i in range(n_repeat): estimator.fit(X_train[i], y_train[i]) y_predict[:, i] = estimator.predict(X_test) # Bias^2 + Variance + Noise decomposition of the mean squared error y_error = np.zeros(n_test) for i in range(n_repeat): for j in range(n_repeat): y_error += (y_test[:, j] - y_predict[:, i]) ** 2 y_error /= (n_repeat * n_repeat) y_noise = np.var(y_test, axis=1) y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2 y_var = np.var(y_predict, axis=1) print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) " " + {3:.4f} (var) + {4:.4f} (noise)".format(name, np.mean(y_error), np.mean(y_bias), np.mean(y_var), np.mean(y_noise))) # Plot figures plt.subplot(2, n_estimators, n + 1) plt.plot(X_test, f(X_test), "b", label="$f(x)$") plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$") for i in range(n_repeat): if i == 0: plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$") else: plt.plot(X_test, y_predict[:, i], "r", alpha=0.05) plt.plot(X_test, np.mean(y_predict, axis=1), "c", label="$\mathbb{E}_{LS} \^y(x)$") plt.xlim([-5, 5]) plt.title(name) if n == 0: plt.legend(loc="upper left", prop={"size": 11}) plt.subplot(2, n_estimators, n_estimators + n + 1) plt.plot(X_test, y_error, "r", label="$error(x)$") plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"), plt.plot(X_test, y_var, "g", label="$variance(x)$"), plt.plot(X_test, y_noise, "c", label="$noise(x)$") plt.xlim([-5, 5]) plt.ylim([0, 0.1]) if n == 0: plt.legend(loc="upper left", prop={"size": 11}) plt.show()
bsd-3-clause
Lightmatter/django-inlineformfield
.tox/py27/lib/python2.7/site-packages/IPython/lib/tests/test_latextools.py
10
4076
# encoding: utf-8 """Tests for IPython.utils.path.py""" #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- import nose.tools as nt from IPython.lib import latextools from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib from IPython.testing.tools import monkeypatch from IPython.utils.process import FindCmdError def test_latex_to_png_dvipng_fails_when_no_cmd(): """ `latex_to_png_dvipng` should return None when there is no required command """ for command in ['latex', 'dvipng']: yield (check_latex_to_png_dvipng_fails_when_no_cmd, command) def check_latex_to_png_dvipng_fails_when_no_cmd(command): def mock_find_cmd(arg): if arg == command: raise FindCmdError with monkeypatch(latextools, "find_cmd", mock_find_cmd): nt.assert_equals(latextools.latex_to_png_dvipng("whatever", True), None) @onlyif_cmds_exist('latex', 'dvipng') def test_latex_to_png_dvipng_runs(): """ Test that latex_to_png_dvipng just runs without error. """ def mock_kpsewhich(filename): nt.assert_equals(filename, "breqn.sty") return None for (s, wrap) in [("$$x^2$$", False), ("x^2", True)]: yield (latextools.latex_to_png_dvipng, s, wrap) with monkeypatch(latextools, "kpsewhich", mock_kpsewhich): yield (latextools.latex_to_png_dvipng, s, wrap) @skipif_not_matplotlib def test_latex_to_png_mpl_runs(): """ Test that latex_to_png_mpl just runs without error. """ def mock_kpsewhich(filename): nt.assert_equals(filename, "breqn.sty") return None for (s, wrap) in [("$x^2$", False), ("x^2", True)]: yield (latextools.latex_to_png_mpl, s, wrap) with monkeypatch(latextools, "kpsewhich", mock_kpsewhich): yield (latextools.latex_to_png_mpl, s, wrap) @skipif_not_matplotlib def test_latex_to_html(): img = latextools.latex_to_html("$x^2$") nt.assert_in("data:image/png;base64,iVBOR", img) def test_genelatex_no_wrap(): """ Test genelatex with wrap=False. """ def mock_kpsewhich(filename): assert False, ("kpsewhich should not be called " "(called with {0})".format(filename)) with monkeypatch(latextools, "kpsewhich", mock_kpsewhich): nt.assert_equals( '\n'.join(latextools.genelatex("body text", False)), r'''\documentclass{article} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage{bm} \pagestyle{empty} \begin{document} body text \end{document}''') def test_genelatex_wrap_with_breqn(): """ Test genelatex with wrap=True for the case breqn.sty is installed. """ def mock_kpsewhich(filename): nt.assert_equals(filename, "breqn.sty") return "path/to/breqn.sty" with monkeypatch(latextools, "kpsewhich", mock_kpsewhich): nt.assert_equals( '\n'.join(latextools.genelatex("x^2", True)), r'''\documentclass{article} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage{bm} \usepackage{breqn} \pagestyle{empty} \begin{document} \begin{dmath*} x^2 \end{dmath*} \end{document}''') def test_genelatex_wrap_without_breqn(): """ Test genelatex with wrap=True for the case breqn.sty is not installed. """ def mock_kpsewhich(filename): nt.assert_equals(filename, "breqn.sty") return None with monkeypatch(latextools, "kpsewhich", mock_kpsewhich): nt.assert_equals( '\n'.join(latextools.genelatex("x^2", True)), r'''\documentclass{article} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage{bm} \pagestyle{empty} \begin{document} $$x^2$$ \end{document}''')
mit
alvarofierroclavero/scikit-learn
examples/svm/plot_separating_hyperplane.py
294
1273
""" ========================================= SVM: Maximum margin separating hyperplane ========================================= Plot the maximum margin separating hyperplane within a two-class separable dataset using a Support Vector Machine classifier with linear kernel. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm # we create 40 separable points np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [0] * 20 + [1] * 20 # fit the model clf = svm.SVC(kernel='linear') clf.fit(X, Y) # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors b = clf.support_vectors_[0] yy_down = a * xx + (b[1] - a * b[0]) b = clf.support_vectors_[-1] yy_up = a * xx + (b[1] - a * b[0]) # plot the line, the points, and the nearest vectors to the plane plt.plot(xx, yy, 'k-') plt.plot(xx, yy_down, 'k--') plt.plot(xx, yy_up, 'k--') plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none') plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.axis('tight') plt.show()
bsd-3-clause
ZenDevelopmentSystems/scikit-learn
sklearn/datasets/base.py
196
18554
""" Base IO code for all datasets """ # Copyright (c) 2007 David Cournapeau <cournape@gmail.com> # 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr> # 2010 Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause import os import csv import shutil from os import environ from os.path import dirname from os.path import join from os.path import exists from os.path import expanduser from os.path import isdir from os import listdir from os import makedirs import numpy as np from ..utils import check_random_state class Bunch(dict): """Container object for datasets Dictionary-like object that exposes its keys as attributes. >>> b = Bunch(a=1, b=2) >>> b['b'] 2 >>> b.b 2 >>> b.a = 3 >>> b['a'] 3 >>> b.c = 6 >>> b['c'] 6 """ def __init__(self, **kwargs): dict.__init__(self, kwargs) def __setattr__(self, key, value): self[key] = value def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __getstate__(self): return self.__dict__ def get_data_home(data_home=None): """Return the path of the scikit-learn data dir. This folder is used by some large dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'scikit_learn_data' in the user home folder. Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. """ if data_home is None: data_home = environ.get('SCIKIT_LEARN_DATA', join('~', 'scikit_learn_data')) data_home = expanduser(data_home) if not exists(data_home): makedirs(data_home) return data_home def clear_data_home(data_home=None): """Delete all the content of the data home cache.""" data_home = get_data_home(data_home) shutil.rmtree(data_home) def load_files(container_path, description=None, categories=None, load_content=True, shuffle=True, encoding=None, decode_error='strict', random_state=0): """Load text files with categories as subfolder names. Individual samples are assumed to be files stored a two levels folder structure such as the following: container_folder/ category_1_folder/ file_1.txt file_2.txt ... file_42.txt category_2_folder/ file_43.txt file_44.txt ... The folder names are used as supervised signal label names. The individual file names are not important. This function does not try to extract features into a numpy array or scipy sparse matrix. In addition, if load_content is false it does not try to load the files in memory. To use text files in a scikit-learn classification or clustering algorithm, you will need to use the `sklearn.feature_extraction.text` module to build a feature extraction transformer that suits your problem. If you set load_content=True, you should also specify the encoding of the text using the 'encoding' parameter. For many modern text files, 'utf-8' will be the correct encoding. If you leave encoding equal to None, then the content will be made of bytes instead of Unicode, and you will not be able to use most functions in `sklearn.feature_extraction.text`. Similar feature extractors should be built for other kind of unstructured data input such as images, audio, video, ... Read more in the :ref:`User Guide <datasets>`. Parameters ---------- container_path : string or unicode Path to the main folder holding one subfolder per category description: string or unicode, optional (default=None) A paragraph describing the characteristic of the dataset: its source, reference, etc. categories : A collection of strings or None, optional (default=None) If None (default), load all the categories. If not None, list of category names to load (other categories ignored). load_content : boolean, optional (default=True) Whether to load or not the content of the different files. If true a 'data' attribute containing the text information is present in the data structure returned. If not, a filenames attribute gives the path to the files. encoding : string or None (default is None) If None, do not try to decode the content of the files (e.g. for images or other non-text content). If not None, encoding to use to decode text files to Unicode if load_content is True. decode_error: {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. Passed as keyword argument 'errors' to bytes.decode. shuffle : bool, optional (default=True) Whether or not to shuffle the data: might be important for models that make the assumption that the samples are independent and identically distributed (i.i.d.), such as stochastic gradient descent. random_state : int, RandomState instance or None, optional (default=0) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: either data, the raw text data to learn, or 'filenames', the files holding it, 'target', the classification labels (integer index), 'target_names', the meaning of the labels, and 'DESCR', the full description of the dataset. """ target = [] target_names = [] filenames = [] folders = [f for f in sorted(listdir(container_path)) if isdir(join(container_path, f))] if categories is not None: folders = [f for f in folders if f in categories] for label, folder in enumerate(folders): target_names.append(folder) folder_path = join(container_path, folder) documents = [join(folder_path, d) for d in sorted(listdir(folder_path))] target.extend(len(documents) * [label]) filenames.extend(documents) # convert to array for fancy indexing filenames = np.array(filenames) target = np.array(target) if shuffle: random_state = check_random_state(random_state) indices = np.arange(filenames.shape[0]) random_state.shuffle(indices) filenames = filenames[indices] target = target[indices] if load_content: data = [] for filename in filenames: with open(filename, 'rb') as f: data.append(f.read()) if encoding is not None: data = [d.decode(encoding, decode_error) for d in data] return Bunch(data=data, filenames=filenames, target_names=target_names, target=target, DESCR=description) return Bunch(filenames=filenames, target_names=target_names, target=target, DESCR=description) def load_iris(): """Load and return the iris dataset (classification). The iris dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class 50 Samples total 150 Dimensionality 4 Features real, positive ================= ============== Read more in the :ref:`User Guide <datasets>`. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'target_names', the meaning of the labels, 'feature_names', the meaning of the features, and 'DESCR', the full description of the dataset. Examples -------- Let's say you are interested in the samples 10, 25, and 50, and want to know their class name. >>> from sklearn.datasets import load_iris >>> data = load_iris() >>> data.target[[10, 25, 50]] array([0, 0, 1]) >>> list(data.target_names) ['setosa', 'versicolor', 'virginica'] """ module_path = dirname(__file__) with open(join(module_path, 'data', 'iris.csv')) as csv_file: data_file = csv.reader(csv_file) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) target_names = np.array(temp[2:]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,), dtype=np.int) for i, ir in enumerate(data_file): data[i] = np.asarray(ir[:-1], dtype=np.float) target[i] = np.asarray(ir[-1], dtype=np.int) with open(join(module_path, 'descr', 'iris.rst')) as rst_file: fdescr = rst_file.read() return Bunch(data=data, target=target, target_names=target_names, DESCR=fdescr, feature_names=['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']) def load_digits(n_class=10): """Load and return the digits dataset (classification). Each datapoint is a 8x8 image of a digit. ================= ============== Classes 10 Samples per class ~180 Samples total 1797 Dimensionality 64 Features integers 0-16 ================= ============== Read more in the :ref:`User Guide <datasets>`. Parameters ---------- n_class : integer, between 0 and 10, optional (default=10) The number of classes to return. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'images', the images corresponding to each sample, 'target', the classification labels for each sample, 'target_names', the meaning of the labels, and 'DESCR', the full description of the dataset. Examples -------- To load the data and visualize the images:: >>> from sklearn.datasets import load_digits >>> digits = load_digits() >>> print(digits.data.shape) (1797, 64) >>> import pylab as pl #doctest: +SKIP >>> pl.gray() #doctest: +SKIP >>> pl.matshow(digits.images[0]) #doctest: +SKIP >>> pl.show() #doctest: +SKIP """ module_path = dirname(__file__) data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'), delimiter=',') with open(join(module_path, 'descr', 'digits.rst')) as f: descr = f.read() target = data[:, -1] flat_data = data[:, :-1] images = flat_data.view() images.shape = (-1, 8, 8) if n_class < 10: idx = target < n_class flat_data, target = flat_data[idx], target[idx] images = images[idx] return Bunch(data=flat_data, target=target.astype(np.int), target_names=np.arange(10), images=images, DESCR=descr) def load_diabetes(): """Load and return the diabetes dataset (regression). ============== ================== Samples total 442 Dimensionality 10 Features real, -.2 < x < .2 Targets integer 25 - 346 ============== ================== Read more in the :ref:`User Guide <datasets>`. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn and 'target', the regression target for each sample. """ base_dir = join(dirname(__file__), 'data') data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz')) target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz')) return Bunch(data=data, target=target) def load_linnerud(): """Load and return the linnerud dataset (multivariate regression). Samples total: 20 Dimensionality: 3 for both data and targets Features: integer Targets: integer Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data' and 'targets', the two multivariate datasets, with 'data' corresponding to the exercise and 'targets' corresponding to the physiological measurements, as well as 'feature_names' and 'target_names'. """ base_dir = join(dirname(__file__), 'data/') # Read data data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1) data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv', skiprows=1) # Read header with open(base_dir + 'linnerud_exercise.csv') as f: header_exercise = f.readline().split() with open(base_dir + 'linnerud_physiological.csv') as f: header_physiological = f.readline().split() with open(dirname(__file__) + '/descr/linnerud.rst') as f: descr = f.read() return Bunch(data=data_exercise, feature_names=header_exercise, target=data_physiological, target_names=header_physiological, DESCR=descr) def load_boston(): """Load and return the boston house-prices dataset (regression). ============== ============== Samples total 506 Dimensionality 13 Features real, positive Targets real 5. - 50. ============== ============== Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the regression targets, and 'DESCR', the full description of the dataset. Examples -------- >>> from sklearn.datasets import load_boston >>> boston = load_boston() >>> print(boston.data.shape) (506, 13) """ module_path = dirname(__file__) fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst') with open(fdescr_name) as f: descr_text = f.read() data_file_name = join(module_path, 'data', 'boston_house_prices.csv') with open(data_file_name) as f: data_file = csv.reader(f) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,)) temp = next(data_file) # names of features feature_names = np.array(temp) for i, d in enumerate(data_file): data[i] = np.asarray(d[:-1], dtype=np.float) target[i] = np.asarray(d[-1], dtype=np.float) return Bunch(data=data, target=target, # last column is target value feature_names=feature_names[:-1], DESCR=descr_text) def load_sample_images(): """Load sample images for image manipulation. Loads both, ``china`` and ``flower``. Returns ------- data : Bunch Dictionary-like object with the following attributes : 'images', the two sample images, 'filenames', the file names for the images, and 'DESCR' the full description of the dataset. Examples -------- To load the data and visualize the images: >>> from sklearn.datasets import load_sample_images >>> dataset = load_sample_images() #doctest: +SKIP >>> len(dataset.images) #doctest: +SKIP 2 >>> first_img_data = dataset.images[0] #doctest: +SKIP >>> first_img_data.shape #doctest: +SKIP (427, 640, 3) >>> first_img_data.dtype #doctest: +SKIP dtype('uint8') """ # Try to import imread from scipy. We do this lazily here to prevent # this module from depending on PIL. try: try: from scipy.misc import imread except ImportError: from scipy.misc.pilutil import imread except ImportError: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") module_path = join(dirname(__file__), "images") with open(join(module_path, 'README.txt')) as f: descr = f.read() filenames = [join(module_path, filename) for filename in os.listdir(module_path) if filename.endswith(".jpg")] # Load image data for each image in the source folder. images = [imread(filename) for filename in filenames] return Bunch(images=images, filenames=filenames, DESCR=descr) def load_sample_image(image_name): """Load the numpy array of a single sample image Parameters ----------- image_name: {`china.jpg`, `flower.jpg`} The name of the sample image loaded Returns ------- img: 3D array The image as a numpy array: height x width x color Examples --------- >>> from sklearn.datasets import load_sample_image >>> china = load_sample_image('china.jpg') # doctest: +SKIP >>> china.dtype # doctest: +SKIP dtype('uint8') >>> china.shape # doctest: +SKIP (427, 640, 3) >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP >>> flower.dtype # doctest: +SKIP dtype('uint8') >>> flower.shape # doctest: +SKIP (427, 640, 3) """ images = load_sample_images() index = None for i, filename in enumerate(images.filenames): if filename.endswith(image_name): index = i break if index is None: raise AttributeError("Cannot find sample image: %s" % image_name) return images.images[index]
bsd-3-clause
jchodera/MSMs
shanson/mek-10488/msmbuilder-finding4/msmbuilder-finding4-mek.py
2
2134
import matplotlib matplotlib.use('Agg') from msmbuilder.dataset import dataset from msmbuilder import msm, featurizer, utils, decomposition import numpy as np import mdtraj as md import matplotlib.pyplot as plt from glob import glob import os # Source directory for MEK simulations source_directory = '/cbio/jclab/projects/fah/fah-data/munged/no-solvent/10488' ################################################################################ # Load trajectories ################################################################################ print ('loading trajectories...') filenames = glob(os.path.join(source_directory, '*0.h5')) trajectories = [md.load(filename) for filename in filenames] print "We are analyzing %s trajectories." % len(trajectories) ################################################################################ # initialize dihedral and tICA features ################################################################################ print('initializing dihedral and tICA features...') dihedrals = featurizer.DihedralFeaturizer(types=["phi", "psi", "chi1", "chi2"]).transform(trajectories) tica = decomposition.tICA(n_components = 4,lag_time= 1600) X = tica.fit_transform(dihedrals) ################################################################################ # Make eigenvalues plot ################################################################################ plt.clf() eigenvalues = (tica.eigenvalues_)**2 sum_eigenvalues = np.sum(eigenvalues[0:2]) print "This is the sum of the first two eigenvalues: %s." % sum_eigenvalues plt.plot(eigenvalues) plt.xlim(0,4) plt.ylim(0,1.2) plt.annotate('sum first two: %s.' % sum_eigenvalues, xy=(0.25,0.1)) plt.savefig('msmb-eigenvalues.png') ################################################################################ # plot first two tics ################################################################################ plt.clf() Xf = np.concatenate(X) plt.hexbin(Xf[:,0], Xf[:, 1], bins='log') plt.title("Dihedral tICA Analysis") plt.xlabel("tic 1") plt.ylabel("tic 2") plt.savefig("msmbuilder-finding4-mek.png", bbox_inches="tight")
gpl-2.0
alexeyum/scikit-learn
benchmarks/bench_isotonic.py
38
3047
""" Benchmarks of isotonic regression performance. We generate a synthetic dataset of size 10^n, for n in [min, max], and examine the time taken to run isotonic regression over the dataset. The timings are then output to stdout, or visualized on a log-log scale with matplotlib. This allows the scaling of the algorithm with the problem size to be visualized and understood. """ from __future__ import print_function import numpy as np import gc from datetime import datetime from sklearn.isotonic import isotonic_regression from sklearn.utils.bench import total_seconds import matplotlib.pyplot as plt import argparse def generate_perturbed_logarithm_dataset(size): return np.random.randint(-50, 50, size=n) \ + 50. * np.log(1 + np.arange(n)) def generate_logistic_dataset(size): X = np.sort(np.random.normal(size=size)) return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X)) DATASET_GENERATORS = { 'perturbed_logarithm': generate_perturbed_logarithm_dataset, 'logistic': generate_logistic_dataset } def bench_isotonic_regression(Y): """ Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds). """ gc.collect() tstart = datetime.now() isotonic_regression(Y) delta = datetime.now() - tstart return total_seconds(delta) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Isotonic Regression benchmark tool") parser.add_argument('--iterations', type=int, required=True, help="Number of iterations to average timings over " "for each problem size") parser.add_argument('--log_min_problem_size', type=int, required=True, help="Base 10 logarithm of the minimum problem size") parser.add_argument('--log_max_problem_size', type=int, required=True, help="Base 10 logarithm of the maximum problem size") parser.add_argument('--show_plot', action='store_true', help="Plot timing output with matplotlib") parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(), required=True) args = parser.parse_args() timings = [] for exponent in range(args.log_min_problem_size, args.log_max_problem_size): n = 10 ** exponent Y = DATASET_GENERATORS[args.dataset](n) time_per_iteration = \ [bench_isotonic_regression(Y) for i in range(args.iterations)] timing = (n, np.mean(time_per_iteration)) timings.append(timing) # If we're not plotting, dump the timing to stdout if not args.show_plot: print(n, np.mean(time_per_iteration)) if args.show_plot: plt.plot(*zip(*timings)) plt.title("Average time taken running isotonic regression") plt.xlabel('Number of observations') plt.ylabel('Time (s)') plt.axis('tight') plt.loglog() plt.show()
bsd-3-clause
Karel-van-de-Plassche/bokeh
bokeh/sampledata/tests/test_airports.py
2
1951
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import pytest ; pytest #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports import pandas as pd # Bokeh imports from bokeh.util.testing import verify_all # Module under test #import bokeh.sampledata.airports as bsa #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- ALL = ( 'data', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.airports", ALL)) @pytest.mark.sampledata def test_data(): import bokeh.sampledata.airports as bsa assert isinstance(bsa.data, pd.DataFrame) # don't check detail for external data #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #-----------------------------------------------------------------------------
bsd-3-clause
jakobworldpeace/scikit-learn
examples/neighbors/plot_kde_1d.py
60
5120
""" =================================== Simple 1D Kernel Density Estimation =================================== This example uses the :class:`sklearn.neighbors.KernelDensity` class to demonstrate the principles of Kernel Density Estimation in one dimension. The first plot shows one of the problems with using histograms to visualize the density of points in 1D. Intuitively, a histogram can be thought of as a scheme in which a unit "block" is stacked above each point on a regular grid. As the top two panels show, however, the choice of gridding for these blocks can lead to wildly divergent ideas about the underlying shape of the density distribution. If we instead center each block on the point it represents, we get the estimate shown in the bottom left panel. This is a kernel density estimation with a "top hat" kernel. This idea can be generalized to other kernel shapes: the bottom-right panel of the first figure shows a Gaussian kernel density estimate over the same distribution. Scikit-learn implements efficient kernel density estimation using either a Ball Tree or KD Tree structure, through the :class:`sklearn.neighbors.KernelDensity` estimator. The available kernels are shown in the second figure of this example. The third figure compares kernel density estimates for a distribution of 100 samples in 1 dimension. Though this example uses 1D distributions, kernel density estimation is easily and efficiently extensible to higher dimensions as well. """ # Author: Jake Vanderplas <jakevdp@cs.washington.edu> # import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm from sklearn.neighbors import KernelDensity #---------------------------------------------------------------------- # Plot the progression of histograms to kernels np.random.seed(1) N = 20 X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)), np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis] X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis] bins = np.linspace(-5, 10, 10) fig, ax = plt.subplots(2, 2, sharex=True, sharey=True) fig.subplots_adjust(hspace=0.05, wspace=0.05) # histogram 1 ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True) ax[0, 0].text(-3.5, 0.31, "Histogram") # histogram 2 ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True) ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted") # tophat KDE kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X) log_dens = kde.score_samples(X_plot) ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF') ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density") # Gaussian KDE kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X) log_dens = kde.score_samples(X_plot) ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF') ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density") for axi in ax.ravel(): axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k') axi.set_xlim(-4, 9) axi.set_ylim(-0.02, 0.34) for axi in ax[:, 0]: axi.set_ylabel('Normalized Density') for axi in ax[1, :]: axi.set_xlabel('x') #---------------------------------------------------------------------- # Plot all available kernels X_plot = np.linspace(-6, 6, 1000)[:, None] X_src = np.zeros((1, 1)) fig, ax = plt.subplots(2, 3, sharex=True, sharey=True) fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05) def format_func(x, loc): if x == 0: return '0' elif x == 1: return 'h' elif x == -1: return '-h' else: return '%ih' % x for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']): axi = ax.ravel()[i] log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot) axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF') axi.text(-2.6, 0.95, kernel) axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) axi.xaxis.set_major_locator(plt.MultipleLocator(1)) axi.yaxis.set_major_locator(plt.NullLocator()) axi.set_ylim(0, 1.05) axi.set_xlim(-2.9, 2.9) ax[0, 1].set_title('Available Kernels') #---------------------------------------------------------------------- # Plot a 1D density example N = 100 np.random.seed(1) X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)), np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis] X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis] true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0]) + 0.7 * norm(5, 1).pdf(X_plot[:, 0])) fig, ax = plt.subplots() ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2, label='input distribution') for kernel in ['gaussian', 'tophat', 'epanechnikov']: kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X) log_dens = kde.score_samples(X_plot) ax.plot(X_plot[:, 0], np.exp(log_dens), '-', label="kernel = '{0}'".format(kernel)) ax.text(6, 0.38, "N={0} points".format(N)) ax.legend(loc='upper left') ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k') ax.set_xlim(-4, 9) ax.set_ylim(-0.02, 0.4) plt.show()
bsd-3-clause
lscheinkman/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/_mathtext_data.py
69
57988
""" font data tables for truetype and afm computer modern fonts """ # this dict maps symbol names to fontnames, glyphindex. To get the # glyph index from the character code, you have to use get_charmap """ from matplotlib.ft2font import FT2Font font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf') items = font.get_charmap().items() items.sort() for charcode, glyphind in items: print charcode, glyphind """ latex_to_bakoma = { r'\oint' : ('cmex10', 45), r'\bigodot' : ('cmex10', 50), r'\bigoplus' : ('cmex10', 55), r'\bigotimes' : ('cmex10', 59), r'\sum' : ('cmex10', 51), r'\prod' : ('cmex10', 24), r'\int' : ('cmex10', 56), r'\bigcup' : ('cmex10', 28), r'\bigcap' : ('cmex10', 60), r'\biguplus' : ('cmex10', 32), r'\bigwedge' : ('cmex10', 4), r'\bigvee' : ('cmex10', 37), r'\coprod' : ('cmex10', 42), r'\__sqrt__' : ('cmex10', 48), r'\leftbrace' : ('cmex10', 92), r'{' : ('cmex10', 92), r'\{' : ('cmex10', 92), r'\rightbrace' : ('cmex10', 130), r'}' : ('cmex10', 130), r'\}' : ('cmex10', 130), r'\leftangle' : ('cmex10', 97), r'\rightangle' : ('cmex10', 64), r'\langle' : ('cmex10', 97), r'\rangle' : ('cmex10', 64), r'\widehat' : ('cmex10', 15), r'\widetilde' : ('cmex10', 52), r'\omega' : ('cmmi10', 29), r'\varepsilon' : ('cmmi10', 20), r'\vartheta' : ('cmmi10', 22), r'\varrho' : ('cmmi10', 61), r'\varsigma' : ('cmmi10', 41), r'\varphi' : ('cmmi10', 6), r'\leftharpoonup' : ('cmmi10', 108), r'\leftharpoondown' : ('cmmi10', 68), r'\rightharpoonup' : ('cmmi10', 117), r'\rightharpoondown' : ('cmmi10', 77), r'\triangleright' : ('cmmi10', 130), r'\triangleleft' : ('cmmi10', 89), r'.' : ('cmmi10', 51), r',' : ('cmmi10', 44), r'<' : ('cmmi10', 99), r'/' : ('cmmi10', 98), r'>' : ('cmmi10', 107), r'\flat' : ('cmmi10', 131), r'\natural' : ('cmmi10', 90), r'\sharp' : ('cmmi10', 50), r'\smile' : ('cmmi10', 97), r'\frown' : ('cmmi10', 58), r'\ell' : ('cmmi10', 102), r'\imath' : ('cmmi10', 8), r'\jmath' : ('cmmi10', 65), r'\wp' : ('cmmi10', 14), r'\alpha' : ('cmmi10', 13), r'\beta' : ('cmmi10', 35), r'\gamma' : ('cmmi10', 24), r'\delta' : ('cmmi10', 38), r'\epsilon' : ('cmmi10', 54), r'\zeta' : ('cmmi10', 10), r'\eta' : ('cmmi10', 5), r'\theta' : ('cmmi10', 18), r'\iota' : ('cmmi10', 28), r'\lambda' : ('cmmi10', 9), r'\mu' : ('cmmi10', 32), r'\nu' : ('cmmi10', 34), r'\xi' : ('cmmi10', 7), r'\pi' : ('cmmi10', 36), r'\kappa' : ('cmmi10', 30), r'\rho' : ('cmmi10', 39), r'\sigma' : ('cmmi10', 21), r'\tau' : ('cmmi10', 43), r'\upsilon' : ('cmmi10', 25), r'\phi' : ('cmmi10', 42), r'\chi' : ('cmmi10', 17), r'\psi' : ('cmmi10', 31), r'|' : ('cmsy10', 47), r'\|' : ('cmsy10', 47), r'(' : ('cmr10', 119), r'\leftparen' : ('cmr10', 119), r'\rightparen' : ('cmr10', 68), r')' : ('cmr10', 68), r'+' : ('cmr10', 76), r'0' : ('cmr10', 40), r'1' : ('cmr10', 100), r'2' : ('cmr10', 49), r'3' : ('cmr10', 110), r'4' : ('cmr10', 59), r'5' : ('cmr10', 120), r'6' : ('cmr10', 69), r'7' : ('cmr10', 127), r'8' : ('cmr10', 77), r'9' : ('cmr10', 22), r':' : ('cmr10', 85), r';' : ('cmr10', 31), r'=' : ('cmr10', 41), r'\leftbracket' : ('cmr10', 62), r'[' : ('cmr10', 62), r'\rightbracket' : ('cmr10', 72), r']' : ('cmr10', 72), r'\%' : ('cmr10', 48), r'%' : ('cmr10', 48), r'\$' : ('cmr10', 99), r'@' : ('cmr10', 111), r'\_' : ('cmtt10', 79), r'\Gamma' : ('cmr10', 19), r'\Delta' : ('cmr10', 6), r'\Theta' : ('cmr10', 7), r'\Lambda' : ('cmr10', 14), r'\Xi' : ('cmr10', 3), r'\Pi' : ('cmr10', 17), r'\Sigma' : ('cmr10', 10), r'\Upsilon' : ('cmr10', 11), r'\Phi' : ('cmr10', 9), r'\Psi' : ('cmr10', 15), r'\Omega' : ('cmr10', 12), # these are mathml names, I think. I'm just using them for the # tex methods noted r'\circumflexaccent' : ('cmr10', 124), # for \hat r'\combiningbreve' : ('cmr10', 81), # for \breve r'\combiningoverline' : ('cmr10', 131), # for \bar r'\combininggraveaccent' : ('cmr10', 114), # for \grave r'\combiningacuteaccent' : ('cmr10', 63), # for \accute r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot r'\combiningtilde' : ('cmr10', 75), # for \tilde r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec r'\combiningdotabove' : ('cmr10', 26), # for \dot r'\leftarrow' : ('cmsy10', 10), r'\uparrow' : ('cmsy10', 25), r'\downarrow' : ('cmsy10', 28), r'\leftrightarrow' : ('cmsy10', 24), r'\nearrow' : ('cmsy10', 99), r'\searrow' : ('cmsy10', 57), r'\simeq' : ('cmsy10', 108), r'\Leftarrow' : ('cmsy10', 104), r'\Rightarrow' : ('cmsy10', 112), r'\Uparrow' : ('cmsy10', 60), r'\Downarrow' : ('cmsy10', 68), r'\Leftrightarrow' : ('cmsy10', 51), r'\nwarrow' : ('cmsy10', 65), r'\swarrow' : ('cmsy10', 116), r'\propto' : ('cmsy10', 15), r'\prime' : ('cmsy10', 73), r"'" : ('cmsy10', 73), r'\infty' : ('cmsy10', 32), r'\in' : ('cmsy10', 59), r'\ni' : ('cmsy10', 122), r'\bigtriangleup' : ('cmsy10', 80), r'\bigtriangledown' : ('cmsy10', 132), r'\slash' : ('cmsy10', 87), r'\forall' : ('cmsy10', 21), r'\exists' : ('cmsy10', 5), r'\neg' : ('cmsy10', 20), r'\emptyset' : ('cmsy10', 33), r'\Re' : ('cmsy10', 95), r'\Im' : ('cmsy10', 52), r'\top' : ('cmsy10', 100), r'\bot' : ('cmsy10', 11), r'\aleph' : ('cmsy10', 26), r'\cup' : ('cmsy10', 6), r'\cap' : ('cmsy10', 19), r'\uplus' : ('cmsy10', 58), r'\wedge' : ('cmsy10', 43), r'\vee' : ('cmsy10', 96), r'\vdash' : ('cmsy10', 109), r'\dashv' : ('cmsy10', 66), r'\lfloor' : ('cmsy10', 117), r'\rfloor' : ('cmsy10', 74), r'\lceil' : ('cmsy10', 123), r'\rceil' : ('cmsy10', 81), r'\lbrace' : ('cmsy10', 92), r'\rbrace' : ('cmsy10', 105), r'\mid' : ('cmsy10', 47), r'\vert' : ('cmsy10', 47), r'\Vert' : ('cmsy10', 44), r'\updownarrow' : ('cmsy10', 94), r'\Updownarrow' : ('cmsy10', 53), r'\backslash' : ('cmsy10', 126), r'\wr' : ('cmsy10', 101), r'\nabla' : ('cmsy10', 110), r'\sqcup' : ('cmsy10', 67), r'\sqcap' : ('cmsy10', 118), r'\sqsubseteq' : ('cmsy10', 75), r'\sqsupseteq' : ('cmsy10', 124), r'\S' : ('cmsy10', 129), r'\dag' : ('cmsy10', 71), r'\ddag' : ('cmsy10', 127), r'\P' : ('cmsy10', 130), r'\clubsuit' : ('cmsy10', 18), r'\diamondsuit' : ('cmsy10', 34), r'\heartsuit' : ('cmsy10', 22), r'-' : ('cmsy10', 17), r'\cdot' : ('cmsy10', 78), r'\times' : ('cmsy10', 13), r'*' : ('cmsy10', 9), r'\ast' : ('cmsy10', 9), r'\div' : ('cmsy10', 31), r'\diamond' : ('cmsy10', 48), r'\pm' : ('cmsy10', 8), r'\mp' : ('cmsy10', 98), r'\oplus' : ('cmsy10', 16), r'\ominus' : ('cmsy10', 56), r'\otimes' : ('cmsy10', 30), r'\oslash' : ('cmsy10', 107), r'\odot' : ('cmsy10', 64), r'\bigcirc' : ('cmsy10', 115), r'\circ' : ('cmsy10', 72), r'\bullet' : ('cmsy10', 84), r'\asymp' : ('cmsy10', 121), r'\equiv' : ('cmsy10', 35), r'\subseteq' : ('cmsy10', 103), r'\supseteq' : ('cmsy10', 42), r'\leq' : ('cmsy10', 14), r'\geq' : ('cmsy10', 29), r'\preceq' : ('cmsy10', 79), r'\succeq' : ('cmsy10', 131), r'\sim' : ('cmsy10', 27), r'\approx' : ('cmsy10', 23), r'\subset' : ('cmsy10', 50), r'\supset' : ('cmsy10', 86), r'\ll' : ('cmsy10', 85), r'\gg' : ('cmsy10', 40), r'\prec' : ('cmsy10', 93), r'\succ' : ('cmsy10', 49), r'\rightarrow' : ('cmsy10', 12), r'\to' : ('cmsy10', 12), r'\spadesuit' : ('cmsy10', 7), } latex_to_cmex = { r'\__sqrt__' : 112, r'\bigcap' : 92, r'\bigcup' : 91, r'\bigodot' : 75, r'\bigoplus' : 77, r'\bigotimes' : 79, r'\biguplus' : 93, r'\bigvee' : 95, r'\bigwedge' : 94, r'\coprod' : 97, r'\int' : 90, r'\leftangle' : 173, r'\leftbrace' : 169, r'\oint' : 73, r'\prod' : 89, r'\rightangle' : 174, r'\rightbrace' : 170, r'\sum' : 88, r'\widehat' : 98, r'\widetilde' : 101, } latex_to_standard = { r'\cong' : ('psyr', 64), r'\Delta' : ('psyr', 68), r'\Phi' : ('psyr', 70), r'\Gamma' : ('psyr', 89), r'\alpha' : ('psyr', 97), r'\beta' : ('psyr', 98), r'\chi' : ('psyr', 99), r'\delta' : ('psyr', 100), r'\varepsilon' : ('psyr', 101), r'\phi' : ('psyr', 102), r'\gamma' : ('psyr', 103), r'\eta' : ('psyr', 104), r'\iota' : ('psyr', 105), r'\varpsi' : ('psyr', 106), r'\kappa' : ('psyr', 108), r'\nu' : ('psyr', 110), r'\pi' : ('psyr', 112), r'\theta' : ('psyr', 113), r'\rho' : ('psyr', 114), r'\sigma' : ('psyr', 115), r'\tau' : ('psyr', 116), r'\upsilon' : ('psyr', 117), r'\varpi' : ('psyr', 118), r'\omega' : ('psyr', 119), r'\xi' : ('psyr', 120), r'\psi' : ('psyr', 121), r'\zeta' : ('psyr', 122), r'\sim' : ('psyr', 126), r'\leq' : ('psyr', 163), r'\infty' : ('psyr', 165), r'\clubsuit' : ('psyr', 167), r'\diamondsuit' : ('psyr', 168), r'\heartsuit' : ('psyr', 169), r'\spadesuit' : ('psyr', 170), r'\leftrightarrow' : ('psyr', 171), r'\leftarrow' : ('psyr', 172), r'\uparrow' : ('psyr', 173), r'\rightarrow' : ('psyr', 174), r'\downarrow' : ('psyr', 175), r'\pm' : ('psyr', 176), r'\geq' : ('psyr', 179), r'\times' : ('psyr', 180), r'\propto' : ('psyr', 181), r'\partial' : ('psyr', 182), r'\bullet' : ('psyr', 183), r'\div' : ('psyr', 184), r'\neq' : ('psyr', 185), r'\equiv' : ('psyr', 186), r'\approx' : ('psyr', 187), r'\ldots' : ('psyr', 188), r'\aleph' : ('psyr', 192), r'\Im' : ('psyr', 193), r'\Re' : ('psyr', 194), r'\wp' : ('psyr', 195), r'\otimes' : ('psyr', 196), r'\oplus' : ('psyr', 197), r'\oslash' : ('psyr', 198), r'\cap' : ('psyr', 199), r'\cup' : ('psyr', 200), r'\supset' : ('psyr', 201), r'\supseteq' : ('psyr', 202), r'\subset' : ('psyr', 204), r'\subseteq' : ('psyr', 205), r'\in' : ('psyr', 206), r'\notin' : ('psyr', 207), r'\angle' : ('psyr', 208), r'\nabla' : ('psyr', 209), r'\textregistered' : ('psyr', 210), r'\copyright' : ('psyr', 211), r'\texttrademark' : ('psyr', 212), r'\Pi' : ('psyr', 213), r'\prod' : ('psyr', 213), r'\surd' : ('psyr', 214), r'\__sqrt__' : ('psyr', 214), r'\cdot' : ('psyr', 215), r'\urcorner' : ('psyr', 216), r'\vee' : ('psyr', 217), r'\wedge' : ('psyr', 218), r'\Leftrightarrow' : ('psyr', 219), r'\Leftarrow' : ('psyr', 220), r'\Uparrow' : ('psyr', 221), r'\Rightarrow' : ('psyr', 222), r'\Downarrow' : ('psyr', 223), r'\Diamond' : ('psyr', 224), r'\langle' : ('psyr', 225), r'\Sigma' : ('psyr', 229), r'\sum' : ('psyr', 229), r'\forall' : ('psyr', 34), r'\exists' : ('psyr', 36), r'\lceil' : ('psyr', 233), r'\lbrace' : ('psyr', 123), r'\Psi' : ('psyr', 89), r'\bot' : ('psyr', 0136), r'\Omega' : ('psyr', 0127), r'\leftbracket' : ('psyr', 0133), r'\rightbracket' : ('psyr', 0135), r'\leftbrace' : ('psyr', 123), r'\leftparen' : ('psyr', 050), r'\prime' : ('psyr', 0242), r'\sharp' : ('psyr', 043), r'\slash' : ('psyr', 057), r'\Lamda' : ('psyr', 0114), r'\neg' : ('psyr', 0330), r'\Upsilon' : ('psyr', 0241), r'\rightbrace' : ('psyr', 0175), r'\rfloor' : ('psyr', 0373), r'\lambda' : ('psyr', 0154), r'\to' : ('psyr', 0256), r'\Xi' : ('psyr', 0130), r'\emptyset' : ('psyr', 0306), r'\lfloor' : ('psyr', 0353), r'\rightparen' : ('psyr', 051), r'\rceil' : ('psyr', 0371), r'\ni' : ('psyr', 047), r'\epsilon' : ('psyr', 0145), r'\Theta' : ('psyr', 0121), r'\langle' : ('psyr', 0341), r'\leftangle' : ('psyr', 0341), r'\rangle' : ('psyr', 0361), r'\rightangle' : ('psyr', 0361), r'\rbrace' : ('psyr', 0175), r'\circ' : ('psyr', 0260), r'\diamond' : ('psyr', 0340), r'\mu' : ('psyr', 0155), r'\mid' : ('psyr', 0352), r'\imath' : ('pncri8a', 105), r'\%' : ('pncr8a', 37), r'\$' : ('pncr8a', 36), r'\{' : ('pncr8a', 123), r'\}' : ('pncr8a', 125), r'\backslash' : ('pncr8a', 92), r'\ast' : ('pncr8a', 42), r'\circumflexaccent' : ('pncri8a', 124), # for \hat r'\combiningbreve' : ('pncri8a', 81), # for \breve r'\combininggraveaccent' : ('pncri8a', 114), # for \grave r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot r'\combiningtilde' : ('pncri8a', 75), # for \tilde r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec r'\combiningdotabove' : ('pncri8a', 26), # for \dot } # Automatically generated. type12uni = {'uni24C8': 9416, 'aring': 229, 'uni22A0': 8864, 'uni2292': 8850, 'quotedblright': 8221, 'uni03D2': 978, 'uni2215': 8725, 'uni03D0': 976, 'V': 86, 'dollar': 36, 'uni301E': 12318, 'uni03D5': 981, 'four': 52, 'uni25A0': 9632, 'uni013C': 316, 'uni013B': 315, 'uni013E': 318, 'Yacute': 221, 'uni25DE': 9694, 'uni013F': 319, 'uni255A': 9562, 'uni2606': 9734, 'uni0180': 384, 'uni22B7': 8887, 'uni044F': 1103, 'uni22B5': 8885, 'uni22B4': 8884, 'uni22AE': 8878, 'uni22B2': 8882, 'uni22B1': 8881, 'uni22B0': 8880, 'uni25CD': 9677, 'uni03CE': 974, 'uni03CD': 973, 'uni03CC': 972, 'uni03CB': 971, 'uni03CA': 970, 'uni22B8': 8888, 'uni22C9': 8905, 'uni0449': 1097, 'uni20DD': 8413, 'uni20DC': 8412, 'uni20DB': 8411, 'uni2231': 8753, 'uni25CF': 9679, 'uni306E': 12398, 'uni03D1': 977, 'uni01A1': 417, 'uni20D7': 8407, 'uni03D6': 982, 'uni2233': 8755, 'uni20D2': 8402, 'uni20D1': 8401, 'uni20D0': 8400, 'P': 80, 'uni22BE': 8894, 'uni22BD': 8893, 'uni22BC': 8892, 'uni22BB': 8891, 'underscore': 95, 'uni03C8': 968, 'uni03C7': 967, 'uni0328': 808, 'uni03C5': 965, 'uni03C4': 964, 'uni03C3': 963, 'uni03C2': 962, 'uni03C1': 961, 'uni03C0': 960, 'uni2010': 8208, 'uni0130': 304, 'uni0133': 307, 'uni0132': 306, 'uni0135': 309, 'uni0134': 308, 'uni0137': 311, 'uni0136': 310, 'uni0139': 313, 'uni0138': 312, 'uni2244': 8772, 'uni229A': 8858, 'uni2571': 9585, 'uni0278': 632, 'uni2239': 8761, 'p': 112, 'uni3019': 12313, 'uni25CB': 9675, 'uni03DB': 987, 'uni03DC': 988, 'uni03DA': 986, 'uni03DF': 991, 'uni03DD': 989, 'uni013D': 317, 'uni220A': 8714, 'uni220C': 8716, 'uni220B': 8715, 'uni220E': 8718, 'uni220D': 8717, 'uni220F': 8719, 'uni22CC': 8908, 'Otilde': 213, 'uni25E5': 9701, 'uni2736': 10038, 'perthousand': 8240, 'zero': 48, 'uni279B': 10139, 'dotlessi': 305, 'uni2279': 8825, 'Scaron': 352, 'zcaron': 382, 'uni21D8': 8664, 'egrave': 232, 'uni0271': 625, 'uni01AA': 426, 'uni2332': 9010, 'section': 167, 'uni25E4': 9700, 'Icircumflex': 206, 'ntilde': 241, 'uni041E': 1054, 'ampersand': 38, 'uni041C': 1052, 'uni041A': 1050, 'uni22AB': 8875, 'uni21DB': 8667, 'dotaccent': 729, 'uni0416': 1046, 'uni0417': 1047, 'uni0414': 1044, 'uni0415': 1045, 'uni0412': 1042, 'uni0413': 1043, 'degree': 176, 'uni0411': 1041, 'K': 75, 'uni25EB': 9707, 'uni25EF': 9711, 'uni0418': 1048, 'uni0419': 1049, 'uni2263': 8803, 'uni226E': 8814, 'uni2251': 8785, 'uni02C8': 712, 'uni2262': 8802, 'acircumflex': 226, 'uni22B3': 8883, 'uni2261': 8801, 'uni2394': 9108, 'Aring': 197, 'uni2260': 8800, 'uni2254': 8788, 'uni0436': 1078, 'uni2267': 8807, 'k': 107, 'uni22C8': 8904, 'uni226A': 8810, 'uni231F': 8991, 'smalltilde': 732, 'uni2201': 8705, 'uni2200': 8704, 'uni2203': 8707, 'uni02BD': 701, 'uni2205': 8709, 'uni2204': 8708, 'Agrave': 192, 'uni2206': 8710, 'uni2209': 8713, 'uni2208': 8712, 'uni226D': 8813, 'uni2264': 8804, 'uni263D': 9789, 'uni2258': 8792, 'uni02D3': 723, 'uni02D2': 722, 'uni02D1': 721, 'uni02D0': 720, 'uni25E1': 9697, 'divide': 247, 'uni02D5': 725, 'uni02D4': 724, 'ocircumflex': 244, 'uni2524': 9508, 'uni043A': 1082, 'uni24CC': 9420, 'asciitilde': 126, 'uni22B9': 8889, 'uni24D2': 9426, 'uni211E': 8478, 'uni211D': 8477, 'uni24DD': 9437, 'uni211A': 8474, 'uni211C': 8476, 'uni211B': 8475, 'uni25C6': 9670, 'uni017F': 383, 'uni017A': 378, 'uni017C': 380, 'uni017B': 379, 'uni0346': 838, 'uni22F1': 8945, 'uni22F0': 8944, 'two': 50, 'uni2298': 8856, 'uni24D1': 9425, 'E': 69, 'uni025D': 605, 'scaron': 353, 'uni2322': 8994, 'uni25E3': 9699, 'uni22BF': 8895, 'F': 70, 'uni0440': 1088, 'uni255E': 9566, 'uni22BA': 8890, 'uni0175': 373, 'uni0174': 372, 'uni0177': 375, 'uni0176': 374, 'bracketleft': 91, 'uni0170': 368, 'uni0173': 371, 'uni0172': 370, 'asciicircum': 94, 'uni0179': 377, 'uni2590': 9616, 'uni25E2': 9698, 'uni2119': 8473, 'uni2118': 8472, 'uni25CC': 9676, 'f': 102, 'ordmasculine': 186, 'uni229B': 8859, 'uni22A1': 8865, 'uni2111': 8465, 'uni2110': 8464, 'uni2113': 8467, 'uni2112': 8466, 'mu': 181, 'uni2281': 8833, 'paragraph': 182, 'nine': 57, 'uni25EC': 9708, 'v': 118, 'uni040C': 1036, 'uni0113': 275, 'uni22D0': 8912, 'uni21CC': 8652, 'uni21CB': 8651, 'uni21CA': 8650, 'uni22A5': 8869, 'uni21CF': 8655, 'uni21CE': 8654, 'uni21CD': 8653, 'guilsinglleft': 8249, 'backslash': 92, 'uni2284': 8836, 'uni224E': 8782, 'uni224D': 8781, 'uni224F': 8783, 'uni224A': 8778, 'uni2287': 8839, 'uni224C': 8780, 'uni224B': 8779, 'uni21BD': 8637, 'uni2286': 8838, 'uni030F': 783, 'uni030D': 781, 'uni030E': 782, 'uni030B': 779, 'uni030C': 780, 'uni030A': 778, 'uni026E': 622, 'uni026D': 621, 'six': 54, 'uni026A': 618, 'uni026C': 620, 'uni25C1': 9665, 'uni20D6': 8406, 'uni045B': 1115, 'uni045C': 1116, 'uni256B': 9579, 'uni045A': 1114, 'uni045F': 1119, 'uni045E': 1118, 'A': 65, 'uni2569': 9577, 'uni0458': 1112, 'uni0459': 1113, 'uni0452': 1106, 'uni0453': 1107, 'uni2562': 9570, 'uni0451': 1105, 'uni0456': 1110, 'uni0457': 1111, 'uni0454': 1108, 'uni0455': 1109, 'icircumflex': 238, 'uni0307': 775, 'uni0304': 772, 'uni0305': 773, 'uni0269': 617, 'uni0268': 616, 'uni0300': 768, 'uni0301': 769, 'uni0265': 613, 'uni0264': 612, 'uni0267': 615, 'uni0266': 614, 'uni0261': 609, 'uni0260': 608, 'uni0263': 611, 'uni0262': 610, 'a': 97, 'uni2207': 8711, 'uni2247': 8775, 'uni2246': 8774, 'uni2241': 8769, 'uni2240': 8768, 'uni2243': 8771, 'uni2242': 8770, 'uni2312': 8978, 'ogonek': 731, 'uni2249': 8777, 'uni2248': 8776, 'uni3030': 12336, 'q': 113, 'uni21C2': 8642, 'uni21C1': 8641, 'uni21C0': 8640, 'uni21C7': 8647, 'uni21C6': 8646, 'uni21C5': 8645, 'uni21C4': 8644, 'uni225F': 8799, 'uni212C': 8492, 'uni21C8': 8648, 'uni2467': 9319, 'oacute': 243, 'uni028F': 655, 'uni028E': 654, 'uni026F': 623, 'uni028C': 652, 'uni028B': 651, 'uni028A': 650, 'uni2510': 9488, 'ograve': 242, 'edieresis': 235, 'uni22CE': 8910, 'uni22CF': 8911, 'uni219F': 8607, 'comma': 44, 'uni22CA': 8906, 'uni0429': 1065, 'uni03C6': 966, 'uni0427': 1063, 'uni0426': 1062, 'uni0425': 1061, 'uni0424': 1060, 'uni0423': 1059, 'uni0422': 1058, 'uni0421': 1057, 'uni0420': 1056, 'uni2465': 9317, 'uni24D0': 9424, 'uni2464': 9316, 'uni0430': 1072, 'otilde': 245, 'uni2661': 9825, 'uni24D6': 9430, 'uni2466': 9318, 'uni24D5': 9429, 'uni219A': 8602, 'uni2518': 9496, 'uni22B6': 8886, 'uni2461': 9313, 'uni24D4': 9428, 'uni2460': 9312, 'uni24EA': 9450, 'guillemotright': 187, 'ecircumflex': 234, 'greater': 62, 'uni2011': 8209, 'uacute': 250, 'uni2462': 9314, 'L': 76, 'bullet': 8226, 'uni02A4': 676, 'uni02A7': 679, 'cedilla': 184, 'uni02A2': 674, 'uni2015': 8213, 'uni22C4': 8900, 'uni22C5': 8901, 'uni22AD': 8877, 'uni22C7': 8903, 'uni22C0': 8896, 'uni2016': 8214, 'uni22C2': 8898, 'uni22C3': 8899, 'uni24CF': 9423, 'uni042F': 1071, 'uni042E': 1070, 'uni042D': 1069, 'ydieresis': 255, 'l': 108, 'logicalnot': 172, 'uni24CA': 9418, 'uni0287': 647, 'uni0286': 646, 'uni0285': 645, 'uni0284': 644, 'uni0283': 643, 'uni0282': 642, 'uni0281': 641, 'uni027C': 636, 'uni2664': 9828, 'exclamdown': 161, 'uni25C4': 9668, 'uni0289': 649, 'uni0288': 648, 'uni039A': 922, 'endash': 8211, 'uni2640': 9792, 'uni20E4': 8420, 'uni0473': 1139, 'uni20E1': 8417, 'uni2642': 9794, 'uni03B8': 952, 'uni03B9': 953, 'agrave': 224, 'uni03B4': 948, 'uni03B5': 949, 'uni03B6': 950, 'uni03B7': 951, 'uni03B0': 944, 'uni03B1': 945, 'uni03B2': 946, 'uni03B3': 947, 'uni2555': 9557, 'Adieresis': 196, 'germandbls': 223, 'Odieresis': 214, 'space': 32, 'uni0126': 294, 'uni0127': 295, 'uni0124': 292, 'uni0125': 293, 'uni0122': 290, 'uni0123': 291, 'uni0120': 288, 'uni0121': 289, 'quoteright': 8217, 'uni2560': 9568, 'uni2556': 9558, 'ucircumflex': 251, 'uni2561': 9569, 'uni2551': 9553, 'uni25B2': 9650, 'uni2550': 9552, 'uni2563': 9571, 'uni2553': 9555, 'G': 71, 'uni2564': 9572, 'uni2552': 9554, 'quoteleft': 8216, 'uni2565': 9573, 'uni2572': 9586, 'uni2568': 9576, 'uni2566': 9574, 'W': 87, 'uni214A': 8522, 'uni012F': 303, 'uni012D': 301, 'uni012E': 302, 'uni012B': 299, 'uni012C': 300, 'uni255C': 9564, 'uni012A': 298, 'uni2289': 8841, 'Q': 81, 'uni2320': 8992, 'uni2321': 8993, 'g': 103, 'uni03BD': 957, 'uni03BE': 958, 'uni03BF': 959, 'uni2282': 8834, 'uni2285': 8837, 'uni03BA': 954, 'uni03BB': 955, 'uni03BC': 956, 'uni2128': 8488, 'uni25B7': 9655, 'w': 119, 'uni0302': 770, 'uni03DE': 990, 'uni25DA': 9690, 'uni0303': 771, 'uni0463': 1123, 'uni0462': 1122, 'uni3018': 12312, 'uni2514': 9492, 'question': 63, 'uni25B3': 9651, 'uni24E1': 9441, 'one': 49, 'uni200A': 8202, 'uni2278': 8824, 'ring': 730, 'uni0195': 405, 'figuredash': 8210, 'uni22EC': 8940, 'uni0339': 825, 'uni0338': 824, 'uni0337': 823, 'uni0336': 822, 'uni0335': 821, 'uni0333': 819, 'uni0332': 818, 'uni0331': 817, 'uni0330': 816, 'uni01C1': 449, 'uni01C0': 448, 'uni01C3': 451, 'uni01C2': 450, 'uni2353': 9043, 'uni0308': 776, 'uni2218': 8728, 'uni2219': 8729, 'uni2216': 8726, 'uni2217': 8727, 'uni2214': 8724, 'uni0309': 777, 'uni2609': 9737, 'uni2213': 8723, 'uni2210': 8720, 'uni2211': 8721, 'uni2245': 8773, 'B': 66, 'uni25D6': 9686, 'iacute': 237, 'uni02E6': 742, 'uni02E7': 743, 'uni02E8': 744, 'uni02E9': 745, 'uni221D': 8733, 'uni221E': 8734, 'Ydieresis': 376, 'uni221C': 8732, 'uni22D7': 8919, 'uni221A': 8730, 'R': 82, 'uni24DC': 9436, 'uni033F': 831, 'uni033E': 830, 'uni033C': 828, 'uni033B': 827, 'uni033A': 826, 'b': 98, 'uni228A': 8842, 'uni22DB': 8923, 'uni2554': 9556, 'uni046B': 1131, 'uni046A': 1130, 'r': 114, 'uni24DB': 9435, 'Ccedilla': 199, 'minus': 8722, 'uni24DA': 9434, 'uni03F0': 1008, 'uni03F1': 1009, 'uni20AC': 8364, 'uni2276': 8822, 'uni24C0': 9408, 'uni0162': 354, 'uni0163': 355, 'uni011E': 286, 'uni011D': 285, 'uni011C': 284, 'uni011B': 283, 'uni0164': 356, 'uni0165': 357, 'Lslash': 321, 'uni0168': 360, 'uni0169': 361, 'uni25C9': 9673, 'uni02E5': 741, 'uni21C3': 8643, 'uni24C4': 9412, 'uni24E2': 9442, 'uni2277': 8823, 'uni013A': 314, 'uni2102': 8450, 'Uacute': 218, 'uni2317': 8983, 'uni2107': 8455, 'uni221F': 8735, 'yacute': 253, 'uni3012': 12306, 'Ucircumflex': 219, 'uni015D': 349, 'quotedbl': 34, 'uni25D9': 9689, 'uni2280': 8832, 'uni22AF': 8879, 'onehalf': 189, 'uni221B': 8731, 'Thorn': 222, 'uni2226': 8742, 'M': 77, 'uni25BA': 9658, 'uni2463': 9315, 'uni2336': 9014, 'eight': 56, 'uni2236': 8758, 'multiply': 215, 'uni210C': 8460, 'uni210A': 8458, 'uni21C9': 8649, 'grave': 96, 'uni210E': 8462, 'uni0117': 279, 'uni016C': 364, 'uni0115': 277, 'uni016A': 362, 'uni016F': 367, 'uni0112': 274, 'uni016D': 365, 'uni016E': 366, 'Ocircumflex': 212, 'uni2305': 8965, 'm': 109, 'uni24DF': 9439, 'uni0119': 281, 'uni0118': 280, 'uni20A3': 8355, 'uni20A4': 8356, 'uni20A7': 8359, 'uni2288': 8840, 'uni24C3': 9411, 'uni251C': 9500, 'uni228D': 8845, 'uni222F': 8751, 'uni222E': 8750, 'uni222D': 8749, 'uni222C': 8748, 'uni222B': 8747, 'uni222A': 8746, 'uni255B': 9563, 'Ugrave': 217, 'uni24DE': 9438, 'guilsinglright': 8250, 'uni250A': 9482, 'Ntilde': 209, 'uni0279': 633, 'questiondown': 191, 'uni256C': 9580, 'Atilde': 195, 'uni0272': 626, 'uni0273': 627, 'uni0270': 624, 'ccedilla': 231, 'uni0276': 630, 'uni0277': 631, 'uni0274': 628, 'uni0275': 629, 'uni2252': 8786, 'uni041F': 1055, 'uni2250': 8784, 'Z': 90, 'uni2256': 8790, 'uni2257': 8791, 'copyright': 169, 'uni2255': 8789, 'uni043D': 1085, 'uni043E': 1086, 'uni043F': 1087, 'yen': 165, 'uni041D': 1053, 'uni043B': 1083, 'uni043C': 1084, 'uni21B0': 8624, 'uni21B1': 8625, 'uni21B2': 8626, 'uni21B3': 8627, 'uni21B4': 8628, 'uni21B5': 8629, 'uni21B6': 8630, 'uni21B7': 8631, 'uni21B8': 8632, 'Eacute': 201, 'uni2311': 8977, 'uni2310': 8976, 'uni228F': 8847, 'uni25DB': 9691, 'uni21BA': 8634, 'uni21BB': 8635, 'uni21BC': 8636, 'uni2017': 8215, 'uni21BE': 8638, 'uni21BF': 8639, 'uni231C': 8988, 'H': 72, 'uni0293': 659, 'uni2202': 8706, 'uni22A4': 8868, 'uni231E': 8990, 'uni2232': 8754, 'uni225B': 8795, 'uni225C': 8796, 'uni24D9': 9433, 'uni225A': 8794, 'uni0438': 1080, 'uni0439': 1081, 'uni225D': 8797, 'uni225E': 8798, 'uni0434': 1076, 'X': 88, 'uni007F': 127, 'uni0437': 1079, 'Idieresis': 207, 'uni0431': 1073, 'uni0432': 1074, 'uni0433': 1075, 'uni22AC': 8876, 'uni22CD': 8909, 'uni25A3': 9635, 'bar': 124, 'uni24BB': 9403, 'uni037E': 894, 'uni027B': 635, 'h': 104, 'uni027A': 634, 'uni027F': 639, 'uni027D': 637, 'uni027E': 638, 'uni2227': 8743, 'uni2004': 8196, 'uni2225': 8741, 'uni2224': 8740, 'uni2223': 8739, 'uni2222': 8738, 'uni2221': 8737, 'uni2220': 8736, 'x': 120, 'uni2323': 8995, 'uni2559': 9561, 'uni2558': 9560, 'uni2229': 8745, 'uni2228': 8744, 'udieresis': 252, 'uni029D': 669, 'ordfeminine': 170, 'uni22CB': 8907, 'uni233D': 9021, 'uni0428': 1064, 'uni24C6': 9414, 'uni22DD': 8925, 'uni24C7': 9415, 'uni015C': 348, 'uni015B': 347, 'uni015A': 346, 'uni22AA': 8874, 'uni015F': 351, 'uni015E': 350, 'braceleft': 123, 'uni24C5': 9413, 'uni0410': 1040, 'uni03AA': 938, 'uni24C2': 9410, 'uni03AC': 940, 'uni03AB': 939, 'macron': 175, 'uni03AD': 941, 'uni03AF': 943, 'uni0294': 660, 'uni0295': 661, 'uni0296': 662, 'uni0297': 663, 'uni0290': 656, 'uni0291': 657, 'uni0292': 658, 'atilde': 227, 'Acircumflex': 194, 'uni2370': 9072, 'uni24C1': 9409, 'uni0298': 664, 'uni0299': 665, 'Oslash': 216, 'uni029E': 670, 'C': 67, 'quotedblleft': 8220, 'uni029B': 667, 'uni029C': 668, 'uni03A9': 937, 'uni03A8': 936, 'S': 83, 'uni24C9': 9417, 'uni03A1': 929, 'uni03A0': 928, 'exclam': 33, 'uni03A5': 933, 'uni03A4': 932, 'uni03A7': 935, 'Zcaron': 381, 'uni2133': 8499, 'uni2132': 8498, 'uni0159': 345, 'uni0158': 344, 'uni2137': 8503, 'uni2005': 8197, 'uni2135': 8501, 'uni2134': 8500, 'uni02BA': 698, 'uni2033': 8243, 'uni0151': 337, 'uni0150': 336, 'uni0157': 343, 'equal': 61, 'uni0155': 341, 'uni0154': 340, 's': 115, 'uni233F': 9023, 'eth': 240, 'uni24BE': 9406, 'uni21E9': 8681, 'uni2060': 8288, 'Egrave': 200, 'uni255D': 9565, 'uni24CD': 9421, 'uni21E1': 8673, 'uni21B9': 8633, 'hyphen': 45, 'uni01BE': 446, 'uni01BB': 443, 'period': 46, 'igrave': 236, 'uni01BA': 442, 'uni2296': 8854, 'uni2297': 8855, 'uni2294': 8852, 'uni2295': 8853, 'colon': 58, 'uni2293': 8851, 'uni2290': 8848, 'uni2291': 8849, 'uni032D': 813, 'uni032E': 814, 'uni032F': 815, 'uni032A': 810, 'uni032B': 811, 'uni032C': 812, 'uni231D': 8989, 'Ecircumflex': 202, 'uni24D7': 9431, 'uni25DD': 9693, 'trademark': 8482, 'Aacute': 193, 'cent': 162, 'uni0445': 1093, 'uni266E': 9838, 'uni266D': 9837, 'uni266B': 9835, 'uni03C9': 969, 'uni2003': 8195, 'uni2047': 8263, 'lslash': 322, 'uni03A6': 934, 'uni2043': 8259, 'uni250C': 9484, 'uni2040': 8256, 'uni255F': 9567, 'uni24CB': 9419, 'uni0472': 1138, 'uni0446': 1094, 'uni0474': 1140, 'uni0475': 1141, 'uni2508': 9480, 'uni2660': 9824, 'uni2506': 9478, 'uni2502': 9474, 'c': 99, 'uni2500': 9472, 'N': 78, 'uni22A6': 8870, 'uni21E7': 8679, 'uni2130': 8496, 'uni2002': 8194, 'breve': 728, 'uni0442': 1090, 'Oacute': 211, 'uni229F': 8863, 'uni25C7': 9671, 'uni229D': 8861, 'uni229E': 8862, 'guillemotleft': 171, 'uni0329': 809, 'uni24E5': 9445, 'uni011F': 287, 'uni0324': 804, 'uni0325': 805, 'uni0326': 806, 'uni0327': 807, 'uni0321': 801, 'uni0322': 802, 'n': 110, 'uni2032': 8242, 'uni2269': 8809, 'uni2268': 8808, 'uni0306': 774, 'uni226B': 8811, 'uni21EA': 8682, 'uni0166': 358, 'uni203B': 8251, 'uni01B5': 437, 'idieresis': 239, 'uni02BC': 700, 'uni01B0': 432, 'braceright': 125, 'seven': 55, 'uni02BB': 699, 'uni011A': 282, 'uni29FB': 10747, 'brokenbar': 166, 'uni2036': 8246, 'uni25C0': 9664, 'uni0156': 342, 'uni22D5': 8917, 'uni0258': 600, 'ugrave': 249, 'uni22D6': 8918, 'uni22D1': 8913, 'uni2034': 8244, 'uni22D3': 8915, 'uni22D2': 8914, 'uni203C': 8252, 'uni223E': 8766, 'uni02BF': 703, 'uni22D9': 8921, 'uni22D8': 8920, 'uni25BD': 9661, 'uni25BE': 9662, 'uni25BF': 9663, 'uni041B': 1051, 'periodcentered': 183, 'uni25BC': 9660, 'uni019E': 414, 'uni019B': 411, 'uni019A': 410, 'uni2007': 8199, 'uni0391': 913, 'uni0390': 912, 'uni0393': 915, 'uni0392': 914, 'uni0395': 917, 'uni0394': 916, 'uni0397': 919, 'uni0396': 918, 'uni0399': 921, 'uni0398': 920, 'uni25C8': 9672, 'uni2468': 9320, 'sterling': 163, 'uni22EB': 8939, 'uni039C': 924, 'uni039B': 923, 'uni039E': 926, 'uni039D': 925, 'uni039F': 927, 'I': 73, 'uni03E1': 993, 'uni03E0': 992, 'uni2319': 8985, 'uni228B': 8843, 'uni25B5': 9653, 'uni25B6': 9654, 'uni22EA': 8938, 'uni24B9': 9401, 'uni044E': 1102, 'uni0199': 409, 'uni2266': 8806, 'Y': 89, 'uni22A2': 8866, 'Eth': 208, 'uni266F': 9839, 'emdash': 8212, 'uni263B': 9787, 'uni24BD': 9405, 'uni22DE': 8926, 'uni0360': 864, 'uni2557': 9559, 'uni22DF': 8927, 'uni22DA': 8922, 'uni22DC': 8924, 'uni0361': 865, 'i': 105, 'uni24BF': 9407, 'uni0362': 866, 'uni263E': 9790, 'uni028D': 653, 'uni2259': 8793, 'uni0323': 803, 'uni2265': 8805, 'daggerdbl': 8225, 'y': 121, 'uni010A': 266, 'plusminus': 177, 'less': 60, 'uni21AE': 8622, 'uni0315': 789, 'uni230B': 8971, 'uni21AF': 8623, 'uni21AA': 8618, 'uni21AC': 8620, 'uni21AB': 8619, 'uni01FB': 507, 'uni01FC': 508, 'uni223A': 8762, 'uni01FA': 506, 'uni01FF': 511, 'uni01FD': 509, 'uni01FE': 510, 'uni2567': 9575, 'uni25E0': 9696, 'uni0104': 260, 'uni0105': 261, 'uni0106': 262, 'uni0107': 263, 'uni0100': 256, 'uni0101': 257, 'uni0102': 258, 'uni0103': 259, 'uni2038': 8248, 'uni2009': 8201, 'uni2008': 8200, 'uni0108': 264, 'uni0109': 265, 'uni02A1': 673, 'uni223B': 8763, 'uni226C': 8812, 'uni25AC': 9644, 'uni24D3': 9427, 'uni21E0': 8672, 'uni21E3': 8675, 'Udieresis': 220, 'uni21E2': 8674, 'D': 68, 'uni21E5': 8677, 'uni2621': 9761, 'uni21D1': 8657, 'uni203E': 8254, 'uni22C6': 8902, 'uni21E4': 8676, 'uni010D': 269, 'uni010E': 270, 'uni010F': 271, 'five': 53, 'T': 84, 'uni010B': 267, 'uni010C': 268, 'uni2605': 9733, 'uni2663': 9827, 'uni21E6': 8678, 'uni24B6': 9398, 'uni22C1': 8897, 'oslash': 248, 'acute': 180, 'uni01F0': 496, 'd': 100, 'OE': 338, 'uni22E3': 8931, 'Igrave': 204, 'uni2308': 8968, 'uni2309': 8969, 'uni21A9': 8617, 't': 116, 'uni2313': 8979, 'uni03A3': 931, 'uni21A4': 8612, 'uni21A7': 8615, 'uni21A6': 8614, 'uni21A1': 8609, 'uni21A0': 8608, 'uni21A3': 8611, 'uni21A2': 8610, 'parenright': 41, 'uni256A': 9578, 'uni25DC': 9692, 'uni24CE': 9422, 'uni042C': 1068, 'uni24E0': 9440, 'uni042B': 1067, 'uni0409': 1033, 'uni0408': 1032, 'uni24E7': 9447, 'uni25B4': 9652, 'uni042A': 1066, 'uni228E': 8846, 'uni0401': 1025, 'adieresis': 228, 'uni0403': 1027, 'quotesingle': 39, 'uni0405': 1029, 'uni0404': 1028, 'uni0407': 1031, 'uni0406': 1030, 'uni229C': 8860, 'uni2306': 8966, 'uni2253': 8787, 'twodotenleader': 8229, 'uni2131': 8497, 'uni21DA': 8666, 'uni2234': 8756, 'uni2235': 8757, 'uni01A5': 421, 'uni2237': 8759, 'uni2230': 8752, 'uni02CC': 716, 'slash': 47, 'uni01A0': 416, 'ellipsis': 8230, 'uni2299': 8857, 'uni2238': 8760, 'numbersign': 35, 'uni21A8': 8616, 'uni223D': 8765, 'uni01AF': 431, 'uni223F': 8767, 'uni01AD': 429, 'uni01AB': 427, 'odieresis': 246, 'uni223C': 8764, 'uni227D': 8829, 'uni0280': 640, 'O': 79, 'uni227E': 8830, 'uni21A5': 8613, 'uni22D4': 8916, 'uni25D4': 9684, 'uni227F': 8831, 'uni0435': 1077, 'uni2302': 8962, 'uni2669': 9833, 'uni24E3': 9443, 'uni2720': 10016, 'uni22A8': 8872, 'uni22A9': 8873, 'uni040A': 1034, 'uni22A7': 8871, 'oe': 339, 'uni040B': 1035, 'uni040E': 1038, 'uni22A3': 8867, 'o': 111, 'uni040F': 1039, 'Edieresis': 203, 'uni25D5': 9685, 'plus': 43, 'uni044D': 1101, 'uni263C': 9788, 'uni22E6': 8934, 'uni2283': 8835, 'uni258C': 9612, 'uni219E': 8606, 'uni24E4': 9444, 'uni2136': 8502, 'dagger': 8224, 'uni24B7': 9399, 'uni219B': 8603, 'uni22E5': 8933, 'three': 51, 'uni210B': 8459, 'uni2534': 9524, 'uni24B8': 9400, 'uni230A': 8970, 'hungarumlaut': 733, 'parenleft': 40, 'uni0148': 328, 'uni0149': 329, 'uni2124': 8484, 'uni2125': 8485, 'uni2126': 8486, 'uni2127': 8487, 'uni0140': 320, 'uni2129': 8489, 'uni25C5': 9669, 'uni0143': 323, 'uni0144': 324, 'uni0145': 325, 'uni0146': 326, 'uni0147': 327, 'uni210D': 8461, 'fraction': 8260, 'uni2031': 8241, 'uni2196': 8598, 'uni2035': 8245, 'uni24E6': 9446, 'uni016B': 363, 'uni24BA': 9402, 'uni266A': 9834, 'uni0116': 278, 'uni2115': 8469, 'registered': 174, 'J': 74, 'uni25DF': 9695, 'uni25CE': 9678, 'uni273D': 10045, 'dieresis': 168, 'uni212B': 8491, 'uni0114': 276, 'uni212D': 8493, 'uni212E': 8494, 'uni212F': 8495, 'uni014A': 330, 'uni014B': 331, 'uni014C': 332, 'uni014D': 333, 'uni014E': 334, 'uni014F': 335, 'uni025E': 606, 'uni24E8': 9448, 'uni0111': 273, 'uni24E9': 9449, 'Ograve': 210, 'j': 106, 'uni2195': 8597, 'uni2194': 8596, 'uni2197': 8599, 'uni2037': 8247, 'uni2191': 8593, 'uni2190': 8592, 'uni2193': 8595, 'uni2192': 8594, 'uni29FA': 10746, 'uni2713': 10003, 'z': 122, 'uni2199': 8601, 'uni2198': 8600, 'uni2667': 9831, 'ae': 230, 'uni0448': 1096, 'semicolon': 59, 'uni2666': 9830, 'uni038F': 911, 'uni0444': 1092, 'uni0447': 1095, 'uni038E': 910, 'uni0441': 1089, 'uni038C': 908, 'uni0443': 1091, 'uni038A': 906, 'uni0250': 592, 'uni0251': 593, 'uni0252': 594, 'uni0253': 595, 'uni0254': 596, 'at': 64, 'uni0256': 598, 'uni0257': 599, 'uni0167': 359, 'uni0259': 601, 'uni228C': 8844, 'uni2662': 9826, 'uni0319': 793, 'uni0318': 792, 'uni24BC': 9404, 'uni0402': 1026, 'uni22EF': 8943, 'Iacute': 205, 'uni22ED': 8941, 'uni22EE': 8942, 'uni0311': 785, 'uni0310': 784, 'uni21E8': 8680, 'uni0312': 786, 'percent': 37, 'uni0317': 791, 'uni0316': 790, 'uni21D6': 8662, 'uni21D7': 8663, 'uni21D4': 8660, 'uni21D5': 8661, 'uni21D2': 8658, 'uni21D3': 8659, 'uni21D0': 8656, 'uni2138': 8504, 'uni2270': 8816, 'uni2271': 8817, 'uni2272': 8818, 'uni2273': 8819, 'uni2274': 8820, 'uni2275': 8821, 'bracketright': 93, 'uni21D9': 8665, 'uni21DF': 8671, 'uni21DD': 8669, 'uni21DE': 8670, 'AE': 198, 'uni03AE': 942, 'uni227A': 8826, 'uni227B': 8827, 'uni227C': 8828, 'asterisk': 42, 'aacute': 225, 'uni226F': 8815, 'uni22E2': 8930, 'uni0386': 902, 'uni22E0': 8928, 'uni22E1': 8929, 'U': 85, 'uni22E7': 8935, 'uni22E4': 8932, 'uni0387': 903, 'uni031A': 794, 'eacute': 233, 'uni22E8': 8936, 'uni22E9': 8937, 'uni24D8': 9432, 'uni025A': 602, 'uni025B': 603, 'uni025C': 604, 'e': 101, 'uni0128': 296, 'uni025F': 607, 'uni2665': 9829, 'thorn': 254, 'uni0129': 297, 'uni253C': 9532, 'uni25D7': 9687, 'u': 117, 'uni0388': 904, 'uni0389': 905, 'uni0255': 597, 'uni0171': 369, 'uni0384': 900, 'uni0385': 901, 'uni044A': 1098, 'uni252C': 9516, 'uni044C': 1100, 'uni044B': 1099} uni2type1 = dict([(v,k) for k,v in type12uni.items()]) tex2uni = { 'widehat': 0x0302, 'widetilde': 0x0303, 'langle': 0x27e8, 'rangle': 0x27e9, 'perp': 0x27c2, 'neq': 0x2260, 'Join': 0x2a1d, 'leqslant': 0x2a7d, 'geqslant': 0x2a7e, 'lessapprox': 0x2a85, 'gtrapprox': 0x2a86, 'lesseqqgtr': 0x2a8b, 'gtreqqless': 0x2a8c, 'triangleeq': 0x225c, 'eqslantless': 0x2a95, 'eqslantgtr': 0x2a96, 'backepsilon': 0x03f6, 'precapprox': 0x2ab7, 'succapprox': 0x2ab8, 'fallingdotseq': 0x2252, 'subseteqq': 0x2ac5, 'supseteqq': 0x2ac6, 'varpropto': 0x221d, 'precnapprox': 0x2ab9, 'succnapprox': 0x2aba, 'subsetneqq': 0x2acb, 'supsetneqq': 0x2acc, 'lnapprox': 0x2ab9, 'gnapprox': 0x2aba, 'longleftarrow': 0x27f5, 'longrightarrow': 0x27f6, 'longleftrightarrow': 0x27f7, 'Longleftarrow': 0x27f8, 'Longrightarrow': 0x27f9, 'Longleftrightarrow': 0x27fa, 'longmapsto': 0x27fc, 'leadsto': 0x21dd, 'dashleftarrow': 0x290e, 'dashrightarrow': 0x290f, 'circlearrowleft': 0x21ba, 'circlearrowright': 0x21bb, 'leftrightsquigarrow': 0x21ad, 'leftsquigarrow': 0x219c, 'rightsquigarrow': 0x219d, 'Game': 0x2141, 'hbar': 0x0127, 'hslash': 0x210f, 'ldots': 0x22ef, 'vdots': 0x22ee, 'doteqdot': 0x2251, 'doteq': 8784, 'partial': 8706, 'gg': 8811, 'asymp': 8781, 'blacktriangledown': 9662, 'otimes': 8855, 'nearrow': 8599, 'varpi': 982, 'vee': 8744, 'vec': 8407, 'smile': 8995, 'succnsim': 8937, 'gimel': 8503, 'vert': 124, '|': 124, 'varrho': 1009, 'P': 182, 'approxident': 8779, 'Swarrow': 8665, 'textasciicircum': 94, 'imageof': 8887, 'ntriangleleft': 8938, 'nleq': 8816, 'div': 247, 'nparallel': 8742, 'Leftarrow': 8656, 'lll': 8920, 'oiint': 8751, 'ngeq': 8817, 'Theta': 920, 'origof': 8886, 'blacksquare': 9632, 'solbar': 9023, 'neg': 172, 'sum': 8721, 'Vdash': 8873, 'coloneq': 8788, 'degree': 176, 'bowtie': 8904, 'blacktriangleright': 9654, 'varsigma': 962, 'leq': 8804, 'ggg': 8921, 'lneqq': 8808, 'scurel': 8881, 'stareq': 8795, 'BbbN': 8469, 'nLeftarrow': 8653, 'nLeftrightarrow': 8654, 'k': 808, 'bot': 8869, 'BbbC': 8450, 'Lsh': 8624, 'leftleftarrows': 8647, 'BbbZ': 8484, 'digamma': 989, 'BbbR': 8477, 'BbbP': 8473, 'BbbQ': 8474, 'vartriangleright': 8883, 'succsim': 8831, 'wedge': 8743, 'lessgtr': 8822, 'veebar': 8891, 'mapsdown': 8615, 'Rsh': 8625, 'chi': 967, 'prec': 8826, 'nsubseteq': 8840, 'therefore': 8756, 'eqcirc': 8790, 'textexclamdown': 161, 'nRightarrow': 8655, 'flat': 9837, 'notin': 8713, 'llcorner': 8990, 'varepsilon': 949, 'bigtriangleup': 9651, 'aleph': 8501, 'dotminus': 8760, 'upsilon': 965, 'Lambda': 923, 'cap': 8745, 'barleftarrow': 8676, 'mu': 956, 'boxplus': 8862, 'mp': 8723, 'circledast': 8859, 'tau': 964, 'in': 8712, 'backslash': 92, 'varnothing': 8709, 'sharp': 9839, 'eqsim': 8770, 'gnsim': 8935, 'Searrow': 8664, 'updownarrows': 8645, 'heartsuit': 9825, 'trianglelefteq': 8884, 'ddag': 8225, 'sqsubseteq': 8849, 'mapsfrom': 8612, 'boxbar': 9707, 'sim': 8764, 'Nwarrow': 8662, 'nequiv': 8802, 'succ': 8827, 'vdash': 8866, 'Leftrightarrow': 8660, 'parallel': 8741, 'invnot': 8976, 'natural': 9838, 'ss': 223, 'uparrow': 8593, 'nsim': 8769, 'hookrightarrow': 8618, 'Equiv': 8803, 'approx': 8776, 'Vvdash': 8874, 'nsucc': 8833, 'leftrightharpoons': 8651, 'Re': 8476, 'boxminus': 8863, 'equiv': 8801, 'Lleftarrow': 8666, 'thinspace': 8201, 'll': 8810, 'Cup': 8915, 'measeq': 8798, 'upharpoonleft': 8639, 'lq': 8216, 'Upsilon': 933, 'subsetneq': 8842, 'greater': 62, 'supsetneq': 8843, 'Cap': 8914, 'L': 321, 'spadesuit': 9824, 'lrcorner': 8991, 'not': 824, 'bar': 772, 'rightharpoonaccent': 8401, 'boxdot': 8865, 'l': 322, 'leftharpoondown': 8637, 'bigcup': 8899, 'iint': 8748, 'bigwedge': 8896, 'downharpoonleft': 8643, 'textasciitilde': 126, 'subset': 8834, 'leqq': 8806, 'mapsup': 8613, 'nvDash': 8877, 'looparrowleft': 8619, 'nless': 8814, 'rightarrowbar': 8677, 'Vert': 8214, 'downdownarrows': 8650, 'uplus': 8846, 'simeq': 8771, 'napprox': 8777, 'ast': 8727, 'twoheaduparrow': 8607, 'doublebarwedge': 8966, 'Sigma': 931, 'leftharpoonaccent': 8400, 'ntrianglelefteq': 8940, 'nexists': 8708, 'times': 215, 'measuredangle': 8737, 'bumpeq': 8783, 'carriagereturn': 8629, 'adots': 8944, 'checkmark': 10003, 'lambda': 955, 'xi': 958, 'rbrace': 125, 'rbrack': 93, 'Nearrow': 8663, 'maltese': 10016, 'clubsuit': 9827, 'top': 8868, 'overarc': 785, 'varphi': 966, 'Delta': 916, 'iota': 953, 'nleftarrow': 8602, 'candra': 784, 'supset': 8835, 'triangleleft': 9665, 'gtreqless': 8923, 'ntrianglerighteq': 8941, 'quad': 8195, 'Xi': 926, 'gtrdot': 8919, 'leftthreetimes': 8907, 'minus': 8722, 'preccurlyeq': 8828, 'nleftrightarrow': 8622, 'lambdabar': 411, 'blacktriangle': 9652, 'kernelcontraction': 8763, 'Phi': 934, 'angle': 8736, 'spadesuitopen': 9828, 'eqless': 8924, 'mid': 8739, 'varkappa': 1008, 'Ldsh': 8626, 'updownarrow': 8597, 'beta': 946, 'textquotedblleft': 8220, 'rho': 961, 'alpha': 945, 'intercal': 8890, 'beth': 8502, 'grave': 768, 'acwopencirclearrow': 8634, 'nmid': 8740, 'nsupset': 8837, 'sigma': 963, 'dot': 775, 'Rightarrow': 8658, 'turnednot': 8985, 'backsimeq': 8909, 'leftarrowtail': 8610, 'approxeq': 8778, 'curlyeqsucc': 8927, 'rightarrowtail': 8611, 'Psi': 936, 'copyright': 169, 'yen': 165, 'vartriangleleft': 8882, 'rasp': 700, 'triangleright': 9655, 'precsim': 8830, 'infty': 8734, 'geq': 8805, 'updownarrowbar': 8616, 'precnsim': 8936, 'H': 779, 'ulcorner': 8988, 'looparrowright': 8620, 'ncong': 8775, 'downarrow': 8595, 'circeq': 8791, 'subseteq': 8838, 'bigstar': 9733, 'prime': 8242, 'lceil': 8968, 'Rrightarrow': 8667, 'oiiint': 8752, 'curlywedge': 8911, 'vDash': 8872, 'lfloor': 8970, 'ddots': 8945, 'exists': 8707, 'underbar': 817, 'Pi': 928, 'leftrightarrows': 8646, 'sphericalangle': 8738, 'coprod': 8720, 'circledcirc': 8858, 'gtrsim': 8819, 'gneqq': 8809, 'between': 8812, 'theta': 952, 'complement': 8705, 'arceq': 8792, 'nVdash': 8878, 'S': 167, 'wr': 8768, 'wp': 8472, 'backcong': 8780, 'lasp': 701, 'c': 807, 'nabla': 8711, 'dotplus': 8724, 'eta': 951, 'forall': 8704, 'eth': 240, 'colon': 58, 'sqcup': 8852, 'rightrightarrows': 8649, 'sqsupset': 8848, 'mapsto': 8614, 'bigtriangledown': 9661, 'sqsupseteq': 8850, 'propto': 8733, 'pi': 960, 'pm': 177, 'dots': 8230, 'nrightarrow': 8603, 'textasciiacute': 180, 'Doteq': 8785, 'breve': 774, 'sqcap': 8851, 'twoheadrightarrow': 8608, 'kappa': 954, 'vartriangle': 9653, 'diamondsuit': 9826, 'pitchfork': 8916, 'blacktriangleleft': 9664, 'nprec': 8832, 'vdots': 8942, 'curvearrowright': 8631, 'barwedge': 8892, 'multimap': 8888, 'textquestiondown': 191, 'cong': 8773, 'rtimes': 8906, 'rightzigzagarrow': 8669, 'rightarrow': 8594, 'leftarrow': 8592, '__sqrt__': 8730, 'twoheaddownarrow': 8609, 'oint': 8750, 'bigvee': 8897, 'eqdef': 8797, 'sterling': 163, 'phi': 981, 'Updownarrow': 8661, 'backprime': 8245, 'emdash': 8212, 'Gamma': 915, 'i': 305, 'rceil': 8969, 'leftharpoonup': 8636, 'Im': 8465, 'curvearrowleft': 8630, 'wedgeq': 8793, 'fallingdotseq': 8786, 'curlyeqprec': 8926, 'questeq': 8799, 'less': 60, 'upuparrows': 8648, 'tilde': 771, 'textasciigrave': 96, 'smallsetminus': 8726, 'ell': 8467, 'cup': 8746, 'danger': 9761, 'nVDash': 8879, 'cdotp': 183, 'cdots': 8943, 'hat': 770, 'eqgtr': 8925, 'enspace': 8194, 'psi': 968, 'frown': 8994, 'acute': 769, 'downzigzagarrow': 8623, 'ntriangleright': 8939, 'cupdot': 8845, 'circleddash': 8861, 'oslash': 8856, 'mho': 8487, 'd': 803, 'sqsubset': 8847, 'cdot': 8901, 'Omega': 937, 'OE': 338, 'veeeq': 8794, 'Finv': 8498, 't': 865, 'leftrightarrow': 8596, 'swarrow': 8601, 'rightthreetimes': 8908, 'rightleftharpoons': 8652, 'lesssim': 8818, 'searrow': 8600, 'because': 8757, 'gtrless': 8823, 'star': 8902, 'nsubset': 8836, 'zeta': 950, 'dddot': 8411, 'bigcirc': 9675, 'Supset': 8913, 'circ': 8728, 'slash': 8725, 'ocirc': 778, 'prod': 8719, 'twoheadleftarrow': 8606, 'daleth': 8504, 'upharpoonright': 8638, 'odot': 8857, 'Uparrow': 8657, 'O': 216, 'hookleftarrow': 8617, 'trianglerighteq': 8885, 'nsime': 8772, 'oe': 339, 'nwarrow': 8598, 'o': 248, 'ddddot': 8412, 'downharpoonright': 8642, 'succcurlyeq': 8829, 'gamma': 947, 'scrR': 8475, 'dag': 8224, 'thickspace': 8197, 'frakZ': 8488, 'lessdot': 8918, 'triangledown': 9663, 'ltimes': 8905, 'scrB': 8492, 'endash': 8211, 'scrE': 8496, 'scrF': 8497, 'scrH': 8459, 'scrI': 8464, 'rightharpoondown': 8641, 'scrL': 8466, 'scrM': 8499, 'frakC': 8493, 'nsupseteq': 8841, 'circledR': 174, 'circledS': 9416, 'ngtr': 8815, 'bigcap': 8898, 'scre': 8495, 'Downarrow': 8659, 'scrg': 8458, 'overleftrightarrow': 8417, 'scro': 8500, 'lnsim': 8934, 'eqcolon': 8789, 'curlyvee': 8910, 'urcorner': 8989, 'lbrace': 123, 'Bumpeq': 8782, 'delta': 948, 'boxtimes': 8864, 'overleftarrow': 8406, 'prurel': 8880, 'clubsuitopen': 9831, 'cwopencirclearrow': 8635, 'geqq': 8807, 'rightleftarrows': 8644, 'ac': 8766, 'ae': 230, 'int': 8747, 'rfloor': 8971, 'risingdotseq': 8787, 'nvdash': 8876, 'diamond': 8900, 'ddot': 776, 'backsim': 8765, 'oplus': 8853, 'triangleq': 8796, 'check': 780, 'ni': 8715, 'iiint': 8749, 'ne': 8800, 'lesseqgtr': 8922, 'obar': 9021, 'supseteq': 8839, 'nu': 957, 'AA': 8491, 'AE': 198, 'models': 8871, 'ominus': 8854, 'dashv': 8867, 'omega': 969, 'rq': 8217, 'Subset': 8912, 'rightharpoonup': 8640, 'Rdsh': 8627, 'bullet': 8729, 'divideontimes': 8903, 'lbrack': 91, 'textquotedblright': 8221, 'Colon': 8759, '%': 37, '$': 36, '{': 123, '}': 125, '_': 95, 'imath': 0x131, 'circumflexaccent' : 770, 'combiningbreve' : 774, 'combiningoverline' : 772, 'combininggraveaccent' : 768, 'combiningacuteaccent' : 769, 'combiningdiaeresis' : 776, 'combiningtilde' : 771, 'combiningrightarrowabove' : 8407, 'combiningdotabove' : 775, 'to': 8594, 'succeq': 8829, 'emptyset': 8709, 'leftparen': 40, 'rightparen': 41, 'bigoplus': 10753, 'leftangle': 10216, 'rightangle': 10217, 'leftbrace': 124, 'rightbrace': 125, 'jmath': 567, 'bigodot': 10752, 'preceq': 8828, 'biguplus': 10756, 'epsilon': 949, 'vartheta': 977, 'bigotimes': 10754 } # Each element is a 4-tuple of the form: # src_start, src_end, dst_font, dst_start # stix_virtual_fonts = { 'bb': { 'rm': [ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 (0x0041, 0x0042, 'rm', 0x1d538), # A-B (0x0043, 0x0043, 'rm', 0x2102), # C (0x0044, 0x0047, 'rm', 0x1d53b), # D-G (0x0048, 0x0048, 'rm', 0x210d), # H (0x0049, 0x004d, 'rm', 0x1d540), # I-M (0x004e, 0x004e, 'rm', 0x2115), # N (0x004f, 0x004f, 'rm', 0x1d546), # O (0x0050, 0x0051, 'rm', 0x2119), # P-Q (0x0052, 0x0052, 'rm', 0x211d), # R (0x0053, 0x0059, 'rm', 0x1d54a), # S-Y (0x005a, 0x005a, 'rm', 0x2124), # Z (0x0061, 0x007a, 'rm', 0x1d552), # a-z (0x0393, 0x0393, 'rm', 0x213e), # \Gamma (0x03a0, 0x03a0, 'rm', 0x213f), # \Pi (0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma (0x03b3, 0x03b3, 'rm', 0x213d), # \gamma (0x03c0, 0x03c0, 'rm', 0x213c), # \pi ], 'it': [ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 (0x0041, 0x0042, 'it', 0xe154), # A-B (0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts) (0x0044, 0x0044, 'it', 0x2145), # D (0x0045, 0x0047, 'it', 0xe156), # E-G (0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts) (0x0049, 0x004d, 'it', 0xe159), # I-M (0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts) (0x004f, 0x004f, 'it', 0xe15e), # O (0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts) (0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts) (0x0053, 0x0059, 'it', 0xe15f), # S-Y (0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts) (0x0061, 0x0063, 'it', 0xe166), # a-c (0x0064, 0x0065, 'it', 0x2146), # d-e (0x0066, 0x0068, 'it', 0xe169), # f-h (0x0069, 0x006a, 'it', 0x2148), # i-j (0x006b, 0x007a, 'it', 0xe16c), # k-z (0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts) (0x03a0, 0x03a0, 'it', 0x213f), # \Pi (0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts) (0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts) (0x03c0, 0x03c0, 'it', 0x213c), # \pi ], 'bf': [ (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 (0x0041, 0x005a, 'bf', 0xe38a), # A-Z (0x0061, 0x007a, 'bf', 0xe39d), # a-z (0x0393, 0x0393, 'bf', 0x213e), # \Gamma (0x03a0, 0x03a0, 'bf', 0x213f), # \Pi (0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma (0x03b3, 0x03b3, 'bf', 0x213d), # \gamma (0x03c0, 0x03c0, 'bf', 0x213c), # \pi ], }, 'cal': [ (0x0041, 0x005a, 'it', 0xe22d), # A-Z ], 'circled': { 'rm': [ (0x0030, 0x0030, 'rm', 0x24ea), # 0 (0x0031, 0x0039, 'rm', 0x2460), # 1-9 (0x0041, 0x005a, 'rm', 0x24b6), # A-Z (0x0061, 0x007a, 'rm', 0x24d0) # a-z ], 'it': [ (0x0030, 0x0030, 'rm', 0x24ea), # 0 (0x0031, 0x0039, 'rm', 0x2460), # 1-9 (0x0041, 0x005a, 'it', 0x24b6), # A-Z (0x0061, 0x007a, 'it', 0x24d0) # a-z ], 'bf': [ (0x0030, 0x0030, 'bf', 0x24ea), # 0 (0x0031, 0x0039, 'bf', 0x2460), # 1-9 (0x0041, 0x005a, 'bf', 0x24b6), # A-Z (0x0061, 0x007a, 'bf', 0x24d0) # a-z ], }, 'frak': { 'rm': [ (0x0041, 0x0042, 'rm', 0x1d504), # A-B (0x0043, 0x0043, 'rm', 0x212d), # C (0x0044, 0x0047, 'rm', 0x1d507), # D-G (0x0048, 0x0048, 'rm', 0x210c), # H (0x0049, 0x0049, 'rm', 0x2111), # I (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q (0x0052, 0x0052, 'rm', 0x211c), # R (0x0053, 0x0059, 'rm', 0x1d516), # S-Y (0x005a, 0x005a, 'rm', 0x2128), # Z (0x0061, 0x007a, 'rm', 0x1d51e), # a-z ], 'it': [ (0x0041, 0x0042, 'rm', 0x1d504), # A-B (0x0043, 0x0043, 'rm', 0x212d), # C (0x0044, 0x0047, 'rm', 0x1d507), # D-G (0x0048, 0x0048, 'rm', 0x210c), # H (0x0049, 0x0049, 'rm', 0x2111), # I (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q (0x0052, 0x0052, 'rm', 0x211c), # R (0x0053, 0x0059, 'rm', 0x1d516), # S-Y (0x005a, 0x005a, 'rm', 0x2128), # Z (0x0061, 0x007a, 'rm', 0x1d51e), # a-z ], 'bf': [ (0x0041, 0x005a, 'bf', 0x1d56c), # A-Z (0x0061, 0x007a, 'bf', 0x1d586), # a-z ], }, 'scr': [ (0x0041, 0x0041, 'it', 0x1d49c), # A (0x0042, 0x0042, 'it', 0x212c), # B (0x0043, 0x0044, 'it', 0x1d49e), # C-D (0x0045, 0x0046, 'it', 0x2130), # E-F (0x0047, 0x0047, 'it', 0x1d4a2), # G (0x0048, 0x0048, 'it', 0x210b), # H (0x0049, 0x0049, 'it', 0x2110), # I (0x004a, 0x004b, 'it', 0x1d4a5), # J-K (0x004c, 0x004c, 'it', 0x2112), # L (0x004d, 0x003d, 'it', 0x2113), # M (0x004e, 0x0051, 'it', 0x1d4a9), # N-Q (0x0052, 0x0052, 'it', 0x211b), # R (0x0053, 0x005a, 'it', 0x1d4ae), # S-Z (0x0061, 0x0064, 'it', 0x1d4b6), # a-d (0x0065, 0x0065, 'it', 0x212f), # e (0x0066, 0x0066, 'it', 0x1d4bb), # f (0x0067, 0x0067, 'it', 0x210a), # g (0x0068, 0x006e, 'it', 0x1d4bd), # h-n (0x006f, 0x006f, 'it', 0x2134), # o (0x0070, 0x007a, 'it', 0x1d4c5), # p-z ], 'sf': { 'rm': [ (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 (0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z (0x0061, 0x007a, 'rm', 0x1d5ba), # a-z (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega (0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega (0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant (0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant (0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant (0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant (0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon (0x2202, 0x2202, 'rm', 0xe17c), # partial differential ], 'it': [ # These numerals are actually upright. We don't actually # want italic numerals ever. (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 (0x0041, 0x005a, 'it', 0x1d608), # A-Z (0x0061, 0x007a, 'it', 0x1d622), # a-z (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega (0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega (0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant (0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant (0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant (0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant (0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon ], 'bf': [ (0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9 (0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z (0x0061, 0x007a, 'bf', 0x1d5ee), # a-z (0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega (0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega (0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant (0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant (0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant (0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant (0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant (0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon (0x2202, 0x2202, 'bf', 0x1d789), # partial differential (0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla ], }, 'tt': [ (0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9 (0x0041, 0x005a, 'rm', 0x1d670), # A-Z (0x0061, 0x007a, 'rm', 0x1d68a) # a-z ], }
agpl-3.0
kevin-intel/scikit-learn
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
19
3140
"""Build a sentiment analysis / polarity model Sentiment analysis can be casted as a binary text classification problem, that is fitting a linear classifier on features extracted from the text of the user messages so as to guess whether the opinion of the author is positive or negative. In this examples we will use a movie review dataset. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.datasets import load_files from sklearn.model_selection import train_test_split from sklearn import metrics if __name__ == "__main__": # NOTE: we put the following in a 'if __name__ == "__main__"' protected # block to be able to use a multi-core grid search that also works under # Windows, see: http://docs.python.org/library/multiprocessing.html#windows # The multiprocessing module is used as the backend of joblib.Parallel # that is used when n_jobs != 1 in GridSearchCV # the training data folder must be passed as first argument movie_reviews_data_folder = sys.argv[1] dataset = load_files(movie_reviews_data_folder, shuffle=False) print("n_samples: %d" % len(dataset.data)) # split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.25, random_state=None) # TASK: Build a vectorizer / classifier pipeline that filters out tokens # that are too rare or too frequent pipeline = Pipeline([ ('vect', TfidfVectorizer(min_df=3, max_df=0.95)), ('clf', LinearSVC(C=1000)), ]) # TASK: Build a grid search to find out whether unigrams or bigrams are # more useful. # Fit the pipeline on the training set using grid search for the parameters parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) grid_search.fit(docs_train, y_train) # TASK: print the mean and std for each candidate along with the parameter # settings for all the candidates explored by grid search. n_candidates = len(grid_search.cv_results_['params']) for i in range(n_candidates): print(i, 'params - %s; mean - %0.2f; std - %0.2f' % (grid_search.cv_results_['params'][i], grid_search.cv_results_['mean_test_score'][i], grid_search.cv_results_['std_test_score'][i])) # TASK: Predict the outcome on the testing set and store it in a variable # named y_predicted y_predicted = grid_search.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Print and plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) # import matplotlib.pyplot as plt # plt.matshow(cm) # plt.show()
bsd-3-clause
cauchycui/scikit-learn
examples/ensemble/plot_adaboost_hastie_10_2.py
355
3576
""" ============================= Discrete versus Real AdaBoost ============================= This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates the difference in performance between the discrete SAMME [2] boosting algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated on a binary classification task where the target Y is a non-linear function of 10 input features. Discrete SAMME AdaBoost adapts based on errors in predicted class labels whereas real SAMME.R uses the predicted class probabilities. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>, # Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import zero_one_loss from sklearn.ensemble import AdaBoostClassifier n_estimators = 400 # A learning rate of 1. may not be optimal for both SAMME and SAMME.R learning_rate = 1. X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1) X_test, y_test = X[2000:], y[2000:] X_train, y_train = X[:2000], y[:2000] dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1) dt_stump.fit(X_train, y_train) dt_stump_err = 1.0 - dt_stump.score(X_test, y_test) dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1) dt.fit(X_train, y_train) dt_err = 1.0 - dt.score(X_test, y_test) ada_discrete = AdaBoostClassifier( base_estimator=dt_stump, learning_rate=learning_rate, n_estimators=n_estimators, algorithm="SAMME") ada_discrete.fit(X_train, y_train) ada_real = AdaBoostClassifier( base_estimator=dt_stump, learning_rate=learning_rate, n_estimators=n_estimators, algorithm="SAMME.R") ada_real.fit(X_train, y_train) fig = plt.figure() ax = fig.add_subplot(111) ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-', label='Decision Stump Error') ax.plot([1, n_estimators], [dt_err] * 2, 'k--', label='Decision Tree Error') ada_discrete_err = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)): ada_discrete_err[i] = zero_one_loss(y_pred, y_test) ada_discrete_err_train = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)): ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train) ada_real_err = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_real.staged_predict(X_test)): ada_real_err[i] = zero_one_loss(y_pred, y_test) ada_real_err_train = np.zeros((n_estimators,)) for i, y_pred in enumerate(ada_real.staged_predict(X_train)): ada_real_err_train[i] = zero_one_loss(y_pred, y_train) ax.plot(np.arange(n_estimators) + 1, ada_discrete_err, label='Discrete AdaBoost Test Error', color='red') ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train, label='Discrete AdaBoost Train Error', color='blue') ax.plot(np.arange(n_estimators) + 1, ada_real_err, label='Real AdaBoost Test Error', color='orange') ax.plot(np.arange(n_estimators) + 1, ada_real_err_train, label='Real AdaBoost Train Error', color='green') ax.set_ylim((0.0, 0.5)) ax.set_xlabel('n_estimators') ax.set_ylabel('error rate') leg = ax.legend(loc='upper right', fancybox=True) leg.get_frame().set_alpha(0.7) plt.show()
bsd-3-clause
eg-zhang/scikit-learn
benchmarks/bench_glm.py
297
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
Stargrazer82301/CAAPR
CAAPR/CAAPR_AstroMagic/PTS/pts/do/evolve/score.py
1
5232
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.do.evolve.score Set scores for GA test. # ----------------------------------------------------------------- # Ensure Python 3 compatibility from __future__ import absolute_import, division, print_function # Import standard modules import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit # Import the relevant PTS classes and modules from pts.core.tools import filesystem as fs from pts.core.tools import tables # ----------------------------------------------------------------- x = np.linspace(12,25,100) test_data_x = [20., 16., 19.79999924, 18.39999962, 17.10000038, 15.5, 14.69999981, 17.10000038, 15.39999962, 16.20000076, 15., 17.20000076, 16., 17., 14.39999962] test_data_y = [88.59999847, 71.59999847, 93.30000305, 84.30000305, 80.59999847, 75.19999695, 69.69999695, 82., 69.40000153, 83.30000305, 79.59999847, 82.59999847, 80.59999847, 83.5, 76.30000305] # ----------------------------------------------------------------- def fit_function(x, a, b): """ This function ... :param x: :param a: :param b: :return: """ return a * x + b # ----------------------------------------------------------------- def chi_squared_function(chromosome): """ This function calculates the chi-squared value for a certain set of parameters (chromosome) :param chromosome: :return: """ chi_squared = 0.0 for i in range(len(test_data_x)): x = test_data_x[i] y = test_data_y[i] chromosome_y = fit_function(x, chromosome[0], chromosome[1]) chi_squared += (y - chromosome_y) ** 2. chi_squared /= 2.0 return chi_squared # ----------------------------------------------------------------- last_generation = None # Check the index of the last generation for name in fs.directories_in_path(): if "reference" in name or "original" in name: continue generation = int(name.split("Generation ")[1]) if last_generation is None or generation > last_generation: last_generation = generation # ----------------------------------------------------------------- if last_generation is None: generation_path = fs.cwd() print("current generation: the initial population") else: generation_path = fs.join(fs.cwd(), "Generation " + str(last_generation)) print("Current generation: " + str(last_generation)) # ----------------------------------------------------------------- # Path to the current GA object path = fs.join(generation_path, "ga.pickle") # Path to the parameters table parameters_path = fs.join(generation_path, "parameters.dat") # ----------------------------------------------------------------- # Load the parameters table table = tables.from_file(parameters_path, format="ascii.ecsv") # ----------------------------------------------------------------- names = [] scores = [] lowest_score = None index_lowest = None for index in range(len(table)): # Get the parameter values parameter_a_tab = table["Parameter a"][index] parameter_b_tab = table["Parameter b"][index] # Calculate the score score = chi_squared_function([parameter_a_tab, parameter_b_tab]) # Keep track of index of lowest score if lowest_score is None or score < lowest_score: lowest_score = score index_lowest = index # Add the score to the list name = table["Unique name"][index] names.append(name) scores.append(score) # Create the chi squared table data = [names, scores] names = ["Unique name", "Chi-squared"] chi_squared_table = tables.new(data, names) # Determine the path to the chi squared table chi_squared_path = fs.join(generation_path, "chi_squared.dat") # Write the chi squared table tables.write(chi_squared_table, chi_squared_path, format="ascii.ecsv") # ----------------------------------------------------------------- best_parameter_a = table["Parameter a"][index_lowest] best_parameter_b = table["Parameter b"][index_lowest] best_path = fs.join(generation_path, "best.dat") with open(best_path, 'w') as best_file: best_file.write("Parameter a: " + str(best_parameter_a) + "\n") best_file.write("Parameter b: " + str(best_parameter_b) + "\n") popt, pcov = curve_fit(fit_function, test_data_x, test_data_y) parameter_a_real = popt[0] parameter_b_real = popt[1] print("Best parameter a:", best_parameter_a, " REAL:", parameter_a_real) print("Best parameter b:", best_parameter_b, " REAL:", parameter_b_real) plt.figure() plt.scatter(test_data_x, test_data_y) plt.plot(x, [fit_function(x_i, best_parameter_a, best_parameter_b) for x_i in x]) plt.plot(x, [fit_function(x_i, parameter_a_real, parameter_b_real) for x_i in x]) plt.ylim(65, 95) plt.xlim(12,22) # Save the figure plot_path = fs.join(generation_path, "best.pdf") plt.savefig(plot_path) # -----------------------------------------------------------------
mit
virneo/opencog
scripts/make_benchmark_graphs.py
56
3139
#!/usr/bin/env python # Requires matplotlib for graphing # reads *_benchmark.csv files as output by atomspace_bm and turns them into # graphs. import csv import numpy as np import matplotlib.colors as colors #import matplotlib.finance as finance import matplotlib.dates as mdates import matplotlib.ticker as mticker import matplotlib.mlab as mlab import matplotlib.pyplot as plt #import matplotlib.font_manager as font_manager import glob import pdb def moving_average(x, n, type='simple'): """ compute an n period moving average. type is 'simple' | 'exponential' """ x = np.asarray(x) if type=='simple': weights = np.ones(n) else: weights = np.exp(np.linspace(-1., 0., n)) weights /= weights.sum() a = np.convolve(x, weights, mode='full')[:len(x)] a[:n] = a[n] return a def graph_file(fn,delta_rss=True): print "Graphing " + fn records = csv.reader(open(fn,'rb'),delimiter=",") sizes=[]; times=[]; times_seconds=[]; memories=[] for row in records: sizes.append(int(row[0])) times.append(int(row[1])) memories.append(int(row[2])) times_seconds.append(float(row[3])) left, width = 0.1, 0.8 rect1 = [left, 0.5, width, 0.4] #left, bottom, width, height rect2 = [left, 0.1, width, 0.4] fig = plt.figure(facecolor='white') axescolor = '#f6f6f6' # the axies background color ax1 = fig.add_axes(rect1, axisbg=axescolor) ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1) ax1.plot(sizes,times_seconds,color='black') if len(times_seconds) > 1000: ax1.plot(sizes,moving_average(times_seconds,len(times_second) / 100),color='blue') if delta_rss: oldmemories = list(memories) for i in range(1,len(memories)): memories[i] = oldmemories[i] - oldmemories[i-1] ax2.plot(sizes,memories,color='black') for label in ax1.get_xticklabels(): label.set_visible(False) class MyLocator(mticker.MaxNLocator): def __init__(self, *args, **kwargs): mticker.MaxNLocator.__init__(self, *args, **kwargs) def __call__(self, *args, **kwargs): return mticker.MaxNLocator.__call__(self, *args, **kwargs) # at most 7 ticks, pruning the upper and lower so they don't overlap # with other ticks fmt = mticker.ScalarFormatter() fmt.set_powerlimits((-3, 4)) ax1.yaxis.set_major_formatter(fmt) ax2.yaxis.set_major_locator(MyLocator(7, prune='upper')) fmt = mticker.ScalarFormatter() fmt.set_powerlimits((-3, 4)) ax2.yaxis.set_major_formatter(fmt) ax2.yaxis.offsetText.set_visible(False) fig.show() size = int(fmt.orderOfMagnitude) / 3 labels = ["B","KB","MB","GB"] label = labels[size] labels = ["","(10s)","(100s)"] label += " " + labels[int(fmt.orderOfMagnitude) % 3] ax2.set_xlabel("AtomSpace Size") ax2.set_ylabel("RSS " + label) ax1.set_ylabel("Time (seconds)") ax1.set_title(fn) fig.show() fig.savefig(fn+".png",format="png") files_to_graph = glob.glob("*_benchmark.csv") for fn in files_to_graph: graph_file(fn);
agpl-3.0
JorgeDeLosSantos/NanchiPlot
nanchi/uiaux.py
1
25587
# -*- coding: utf-8 -*- import matplotlib.pyplot as plt import numpy as np import wx import wx.html as html import wx.grid as wxgrid import wx.lib.floatbar as wxfb import webbrowser try: import uibase as ui import iodata as io from _const_ import * from util import isempty except: import nanchi.uibase as ui import nanchi.iodata as io from nanchi._const_ import * from nanchi.util import isempty class FunctionDialog(wx.Dialog): def __init__(self,parent,**kwargs): #_styles = (wx.CLOSE_BOX|wx.CAPTION) wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION, size=(200,180)) self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD) self.initCtrls() self.initSizers() # Output properties self.data = "" self.out_fun = "" self.out_a = "" self.out_b = "" self.out_points = "" self.Centre(True) #self.Show() def initSizers(self): self.mainsz = wx.BoxSizer(wx.VERTICAL) self.pfunsz = wx.BoxSizer(wx.HORIZONTAL) self.prangesz = wx.BoxSizer(wx.HORIZONTAL) self.pointssz = wx.BoxSizer(wx.HORIZONTAL) self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL) self.pfunsz.Add(self._fun, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.pfunsz.Add(self.fun, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.prangesz.Add(self._a, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.prangesz.Add(self.a, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.prangesz.Add(self._b, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.prangesz.Add(self.b, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.pointssz.Add(self._points, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.pointssz.Add(self.points, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.pbuttonsz.Add(self.okbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5) self.pbuttonsz.Add(self.cancelbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5) for panel in [self.pfun, self.prange, self.pointssz, self.pbutton]: self.mainsz.Add(panel, 1, wx.EXPAND) self.pfun.SetSizer(self.pfunsz) self.prange.SetSizer(self.prangesz) self.pbutton.SetSizer(self.pbuttonsz) self.SetSizer(self.mainsz) def initCtrls(self): # Panels self.pfun = wx.Panel(self, -1) self.prange = wx.Panel(self, -1) self.pbutton = wx.Panel(self, -1) # Controls self._fun = wx.StaticText(self.pfun, -1, u"f(x)", size=(-1,25)) self.fun = wx.TextCtrl(self.pfun, -1, u"15*x^2-x^3", size=(-1,25)) self._a = wx.StaticText(self.prange, -1, u"a", size=(-1,25)) self.a = wx.TextCtrl(self.prange, -1, u"0", size=(50,25)) self._b = wx.StaticText(self.prange, -1, u"b", size=(-1,25)) self.b = wx.TextCtrl(self.prange, -1, u"10", size=(50,25)) self._points = wx.StaticText(self, -1, u"Points", size=(-1,25)) self.points = wx.TextCtrl(self, -1, u"100", size=(80,25)) self.okbutton = wx.Button(self.pbutton, wx.ID_OK, size=(-1,25)) self.cancelbutton = wx.Button(self.pbutton, wx.ID_CANCEL, size=(-1,25), style=wx.ID_CANCEL) for ctrl in [self._fun,self._a,self._b, self._points]: ctrl.SetFont(self.LABEL_FONT) def GetData(self): self.out_fun = self.fun.GetValue().replace("^","**") self.out_a = self.a.GetValue() self.out_b = self.b.GetValue() self.out_points = self.points.GetValue() self.data = (self.out_fun, self.out_a, self.out_b, self.out_points) return self.data class BivariableFunctionDialog(wx.Dialog): def __init__(self,parent,**kwargs): #_styles = (wx.CLOSE_BOX|wx.CAPTION) wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION, size=(220,200)) self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD) self.initCtrls() self.initSizers() # Output properties self.data = "" self.out_fun = "" self.out_x = "" self.out_y = "" self.out_points = "" self.Centre(True) #self.Show() def initSizers(self): # Sizers self.mainsz = wx.BoxSizer(wx.VERTICAL) self.pfunsz = wx.BoxSizer(wx.HORIZONTAL) self.prangexsz = wx.BoxSizer(wx.HORIZONTAL) self.prangeysz = wx.BoxSizer(wx.HORIZONTAL) self.pointssz = wx.BoxSizer(wx.HORIZONTAL) self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL) # add to sizers self.pfunsz.Add(self._fun, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.pfunsz.Add(self.fun, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.prangexsz.Add(self._x1, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.prangexsz.Add(self.x1, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.prangexsz.Add(self._x2, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.prangexsz.Add(self.x2, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.prangeysz.Add(self._y1, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.prangeysz.Add(self.y1, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.prangeysz.Add(self._y2, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.prangeysz.Add(self.y2, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.pointssz.Add(self._points, 1, wx.ALIGN_LEFT|wx.ALL, 5) self.pointssz.Add(self.points, 4, wx.ALIGN_LEFT|wx.ALL, 5) self.pbuttonsz.Add(self.okbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5) self.pbuttonsz.Add(self.cancelbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5) for panel in [self.pfun, self.prangex, self.prangey, self.pointssz, self.pbutton]: self.mainsz.Add(panel, 1, wx.EXPAND) self.pfun.SetSizer(self.pfunsz) self.prangex.SetSizer(self.prangexsz) self.prangey.SetSizer(self.prangeysz) self.pbutton.SetSizer(self.pbuttonsz) self.SetSizer(self.mainsz) def initCtrls(self): self.pfun = wx.Panel(self, -1) self.prangex = wx.Panel(self, -1) self.prangey = wx.Panel(self, -1) self.pbutton = wx.Panel(self, -1) self._fun = wx.StaticText(self.pfun, -1, u"f(x,y)", size=(-1,25)) self.fun = wx.TextCtrl(self.pfun, -1, u"(x*y)/(x^2+y^2)", size=(-1,25)) self._x1 = wx.StaticText(self.prangex, -1, u"x1", size=(-1,25)) self.x1 = wx.TextCtrl(self.prangex, -1, u"-10", size=(50,25)) self._x2 = wx.StaticText(self.prangex, -1, u"x2", size=(-1,25)) self.x2 = wx.TextCtrl(self.prangex, -1, u"10", size=(50,25)) self._y1 = wx.StaticText(self.prangey, -1, u"y1", size=(-1,25)) self.y1 = wx.TextCtrl(self.prangey, -1, u"-10", size=(50,25)) self._y2 = wx.StaticText(self.prangey, -1, u"y2", size=(-1,25)) self.y2 = wx.TextCtrl(self.prangey, -1, u"10", size=(50,25)) self._points = wx.StaticText(self, -1, u"Points", size=(-1,25)) self.points = wx.TextCtrl(self, -1, u"100", size=(80,25)) self.okbutton = wx.Button(self.pbutton, wx.ID_OK, size=(-1,25)) self.cancelbutton = wx.Button(self.pbutton, wx.ID_CANCEL, size=(-1,25), style=wx.ID_CANCEL) for ctrl in [self._fun,self._x1, self._x2, self._y1, self._y2, self._points]: ctrl.SetFont(self.LABEL_FONT) def GetData(self): self.out_fun = self.fun.GetValue().replace("^","**") self.out_x = [self.x1.GetValue(), self.x2.GetValue()] self.out_y = [self.y1.GetValue(), self.y2.GetValue()] self.out_points = self.points.GetValue() self.data = (self.out_fun, self.out_x, self.out_y, self.out_points) return self.data class AboutDialog(wx.Frame): def __init__(self,parent,*args,**kwargs): _styles = wx.CAPTION|wx.CLOSE_BOX wx.Frame.__init__(self,parent=parent,title=NANCHI_MAIN_CAPTION, size=(350,220), style=_styles) self.winhtml = HTMLWindow(self) self.winhtml.LoadPage(PATH_ABOUT_HTML) self.Centre(True) self.Show() class HTMLWindow(html.HtmlWindow): def __init__(self,parent,**kwargs): html.HtmlWindow.__init__(self,parent=parent,**kwargs) def OnLinkClicked(self, link): webbrowser.open(link.GetHref()) class StatusBar(wx.StatusBar): def __init__(self,*args,**kwargs): wx.StatusBar.__init__(self,*args,**kwargs) class BusyInfo(object): def __init__(self, msg, parent=None, bgColour="#f0f0f0", fgColour="#8080ff"): self.frame = _InfoFrame(parent, msg, bgColour, fgColour) self.frame.Show() self.frame.Refresh() self.frame.Update() def __del__(self): self.Close() def Close(self): """ Hide and close the busy info box """ if self.frame: self.frame.Hide() self.frame.Close() self.frame = None # Magic methods for using this class as a Context Manager def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.Close() return False class _InfoFrame(wx.Frame): def __init__(self, parent, msg, bgColour=None, fgColour=None): wx.Frame.__init__(self, parent, style=wx.BORDER_SIMPLE|wx.FRAME_TOOL_WINDOW|wx.STAY_ON_TOP) bgColour = bgColour if bgColour is not None else wx.Colour(253, 255, 225) fgColour = fgColour if fgColour is not None else wx.BLACK panel = wx.Panel(self) text = wx.StaticText(panel, -1, msg) for win in [panel, text]: win.SetCursor(wx.HOURGLASS_CURSOR) win.SetBackgroundColour(bgColour) win.SetForegroundColour(fgColour) size = text.GetBestSize() self.SetClientSize((size.width + 60, size.height + 40)) panel.SetSize(self.GetClientSize()) text.Center() self.Center() class LogCtrl(wx.TextCtrl): def __init__(self,parent,**kwargs): wx.TextCtrl.__init__(self, parent=parent, id=wx.ID_ANY, style=wx.TE_MULTILINE, **kwargs) self.font = wx.Font(9, wx.MODERN, wx.NORMAL, wx.BOLD) self.SetFont(self.font) self.SetForegroundColour("#ff5050") def write(self,string): _nvalue = ">>> %s"%(string) self.SetValue(_nvalue) class ImportDialog(wx.Dialog): def __init__(self,parent,**kwargs): wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION, size=(800,500)) self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL) self.VALUE_FONT = wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD) self.initCtrls() self.initSizers() self.Centre(True) def initSizers(self): self.mainsz = wx.BoxSizer(wx.VERTICAL) self.panelsz = wx.BoxSizer(wx.HORIZONTAL) self.plogsz = wx.BoxSizer(wx.HORIZONTAL) self.pctrlssz = wx.BoxSizer(wx.VERTICAL) self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL) # self.pctrlssz.Add(self._dlm, 0, wx.EXPAND|wx.ALL, 5) self.pctrlssz.Add(self.dlm, 0, wx.EXPAND|wx.ALL, 5) self.pctrlssz.Add(self._skiprows, 0, wx.EXPAND|wx.ALL, 5) self.pctrlssz.Add(self.skiprows, 0, wx.EXPAND|wx.ALL, 5) self.pctrlssz.Add(self.preview, 0, wx.ALIGN_CENTRE|wx.ALL, 10) self.panelsz.Add(self.fctrl, 1, wx.EXPAND|wx.ALL, 5) self.panelsz.Add(self.pctrls, 1, wx.EXPAND|wx.ALL, 5) self.panelsz.Add(self.grid, 2, wx.EXPAND|wx.ALL, 5) self.plogsz.Add(self.log, 1, wx.EXPAND|wx.ALL, 5) self.pbuttonsz.Add(self.okbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5) self.pbuttonsz.Add(self.cancelbutton, 1, wx.ALIGN_CENTRE|wx.ALL, 5) self.mainsz.Add(self.panel, 5, wx.EXPAND|wx.ALL, 5) self.mainsz.Add(self.plog, 1, wx.EXPAND|wx.ALL, 5) self.mainsz.Add(self.pbutton, 0, wx.ALIGN_CENTRE|wx.ALL, 5) self.pctrls.SetSizer(self.pctrlssz) self.panel.SetSizer(self.panelsz) self.plog.SetSizer(self.plogsz) self.pbutton.SetSizer(self.pbuttonsz) self.SetSizer(self.mainsz) def initCtrls(self): self.panel = wx.Panel(self, -1) self.plog = wx.Panel(self, -1) self.pbutton = wx.Panel(self, -1) self.pctrls = wx.Panel(self.panel, -1) wc = IMPORT_DIALOG_WILDCARD self.fctrl = wx.FileCtrl(self.panel, -1, wildCard=wc) self.grid = ui.DataGrid(self.panel, (10,1)) self.grid.SetRowLabelSize(0) self.grid.SetColLabelSize(0) # Controles conf. self._dlm = wx.StaticText(self.pctrls, -1, u"Delimiter", size=(-1,25)) self.dlm = wx.TextCtrl(self.pctrls, -1, u",", size=(-1,25)) self.dlm.SetFont(self.VALUE_FONT) self._skiprows = wx.StaticText(self.pctrls, -1, u"Start reading from row...", size=(-1,25)) self.skiprows = wx.SpinCtrl(self.pctrls, -1, min=1, max=100) self.preview = wx.Button(self.pctrls, -1, u"Preview") # Set labels for label in [self._dlm, self._skiprows]: label.SetFont(self.LABEL_FONT) label.SetForegroundColour("#556655") # Log self.log = LogCtrl(self.plog) # Botones self.okbutton = wx.Button(self.pbutton, wx.ID_OK, size=(100,25)) self.cancelbutton = wx.Button(self.pbutton, wx.ID_CANCEL, size=(100,25), style=wx.ID_CANCEL) self.Bind(wx.EVT_BUTTON, self.OnPreview, self.preview) def OnPreview(self,event): self.grid.SetArrayData(np.array(([[],[]]))) filename = self.fctrl.GetPath() delimiter = self.dlm.GetValue() skipr = self.skiprows.GetValue() mps = 100 # max preview size try: data = io.read_txt(filename, delimiter=delimiter, skiprows=skipr) if not data is None: if data.shape[0]>mps and data.shape[1]>mps: self.grid.SetArrayData(data[:mps,:mps]) elif data.shape[0]>mps and data.shape[1]<mps: self.grid.SetArrayData(data[:mps,:]) elif data.shape[0]<mps and data.shape[1]>mps: self.grid.SetArrayData(data[:,:mps]) else: self.grid.SetArrayData(data) infostr = u"Preview from '%s'\nSize: (%g,%g)"%( filename, data.shape[0], data.shape[1]) self.log.write(infostr) else: self.log.write(u"Unable to read data") except Exception as exc: self.log.write(exc) def GetData(self): filename = self.fctrl.GetPath() delimiter = self.dlm.GetValue() skiprows = self.skiprows.GetValue() try: data = io.read_txt(filename, delimiter=delimiter, skiprows=skiprows) if not data is None: return data else: self.log.write("Unable to read data") except Exception as exc: self.log.write(exc) class TickDialog(wx.Dialog): def __init__(self,parent,axes,xy,**kwargs): wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION, size=(200,400)) #~ self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD) self.xy = xy self.ctick = axes.get_xticks() if xy=="x" else axes.get_yticks() self.clabel = axes.get_xticklabels() if xy=="x" else axes.get_yticklabels() self.axes = axes self.initCtrls() self.initSizers() self.initConfig() self.Centre(True) def initCtrls(self): self.panel = wx.Panel(self, -1) self.pbutton = wx.Panel(self, -1) self.grid = TickGrid(self.panel) self.okbt = wx.Button(self.pbutton, wx.ID_OK, u"OK") self.cancelbt = wx.Button(self.pbutton, wx.ID_CANCEL, u"Cancel") def initSizers(self): self.sz = wx.BoxSizer(wx.VERTICAL) self.panelsz = wx.BoxSizer(wx.VERTICAL) self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL) self.panelsz.Add(self.grid, 1, wx.EXPAND|wx.ALL, 5) self.pbuttonsz.Add(self.okbt, 1, wx.EXPAND|wx.ALL, 5) self.pbuttonsz.Add(self.cancelbt, 1, wx.EXPAND|wx.ALL, 5) self.sz.Add(self.panel, 8, wx.EXPAND|wx.ALL, 5) self.sz.Add(self.pbutton, 1, wx.EXPAND|wx.ALL, 5) self.SetSizer(self.sz) self.panel.SetSizer(self.panelsz) self.pbutton.SetSizer(self.pbuttonsz) def initConfig(self): nrows = len(self.ctick) self.grid.UpdateGridSize(nrows,2) for ii in range(nrows): self.grid.SetCellValue(ii,0,str(self.ctick[ii])) label = self.clabel[ii].get_text() if not label: self.grid.SetCellValue(ii,1,str(self.ctick[ii])) else: self.grid.SetCellValue(ii,1,label) def GetData(self): data = zip(*self.grid.GetArrayData()) ticks = [float(xt) for xt in data[0]] labels = data[1] return ticks,labels class TickGrid(wxgrid.Grid): def __init__(self,parent,**kwargs): wxgrid.Grid.__init__(self,parent=parent,id=-1,**kwargs) gridsize = (2,2) rows = int(gridsize[0]) cols = int(gridsize[1]) self.CreateGrid(rows,cols) self.SetRowLabelSize(0) self.SetColLabelValue(0, "Tick") self.SetColLabelValue(1, "TickLabel") self.Bind(wxgrid.EVT_GRID_CELL_CHANGE, self.OnCellEdit) self.Bind(wxgrid.EVT_GRID_CELL_RIGHT_CLICK, self.OnRightClick) def GetArrayData(self): nrows = self.GetNumberRows() ncols = self.GetNumberCols() X = [] for i in range(nrows): row = [] for j in range(ncols): cval = self.GetCellValue(i,j) if not isempty(cval): row.append(cval) else: row.append("") X.append(row) return X def UpdateGridSize(self,rows,cols): self.ClearGrid() ccols = self.GetNumberCols() crows = self.GetNumberRows() if rows > crows: self.AppendRows(rows-crows) elif rows < crows: self.DeleteRows(0,crows-rows) if cols > ccols: self.AppendCols(cols-ccols) elif cols < ccols: self.DeleteCols(0,ccols-cols) def GetSelectedRows(self): srows = [] top_left = self.GetSelectionBlockTopLeft() bottom_right = self.GetSelectionBlockBottomRight() if not isempty(bottom_right) and not isempty(top_left): max_row = bottom_right[0][0] min_row = top_left[0][0] srows = range(min_row,max_row+1) return srows def OnCellEdit(self,event): pass def OnRightClick(self,event): pum = wx.Menu() addrow = wx.MenuItem(pum, -1, "Add row...") pum.AppendItem(addrow) pum.AppendSeparator() delrows = wx.MenuItem(pum, -1, "Delete rows") pum.AppendItem(delrows) pum.AppendSeparator() clearcell = wx.MenuItem(pum, -1, "Clear cells") pum.AppendItem(clearcell) # Binds pum.Bind(wx.EVT_MENU, self.del_rows, delrows) pum.Bind(wx.EVT_MENU, self.add_row, addrow) pum.Bind(wx.EVT_MENU, self.clear_cell, clearcell) # Show self.PopupMenu(pum) pum.Destroy() def del_rows(self,event): rows = self.GetSelectedRows() self.DeleteRows(rows[0],len(rows)) def add_row(self,event): self.AppendRows(1) def clear_cell(self,event): top_left = self.GetSelectionBlockTopLeft() bottom_right = self.GetSelectionBlockBottomRight() row_range = range(top_left[0][0], bottom_right[0][0] + 1) col_range = range(top_left[0][1], bottom_right[0][1] + 1) for ii in row_range: for jj in col_range: self.SetCellValue(ii,jj,u"") class LineStyleDialog(wx.Dialog): def __init__(self,parent,**kwargs): wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION, size=(200,120)) self.LABEL_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD) self.initCtrls() self.initSizers() self.Centre(True) def initCtrls(self): self.panel = wx.Panel(self, -1) self.pbutton = wx.Panel(self, -1) self._label = wx.StaticText(self.panel, -1, u"Select a line style") self._lstyles = "-|--|:|-.".split("|") self.options = wx.ComboBox(self.panel, -1, choices=self._lstyles) self.options.SetFont(self.LABEL_FONT) self.okbt = wx.Button(self.pbutton, wx.ID_OK) self.cancelbt = wx.Button(self.pbutton, wx.ID_CANCEL) def initSizers(self): self.sz = wx.BoxSizer(wx.VERTICAL) self.panelsz = wx.BoxSizer(wx.VERTICAL) self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL) self.panelsz.Add(self._label, 1, wx.EXPAND|wx.ALL, 2) self.panelsz.Add(self.options, 1, wx.EXPAND|wx.ALL, 2) self.pbuttonsz.Add(self.okbt, 1, wx.EXPAND|wx.ALL, 3) self.pbuttonsz.Add(self.cancelbt, 1, wx.EXPAND|wx.ALL, 3) self.sz.Add(self.panel, 2, wx.EXPAND|wx.ALL, 2) self.sz.Add(self.pbutton, 1, wx.EXPAND|wx.ALL, 2) self.SetSizer(self.sz) self.panel.SetSizer(self.panelsz) self.pbutton.SetSizer(self.pbuttonsz) def GetData(self): _ls = self.options.GetValue() if not _ls in self._lstyles: _ls = "-" return _ls class PieLabelsDialog(wx.Dialog): def __init__(self,parent,labels,**kwargs): wx.Dialog.__init__(self,parent=parent,title=DEFAULT_DIALOG_CAPTION, size=(200,300)) self.labels = labels self.initCtrls() self.initSizers() self.initConfig() self.Centre(True) def initCtrls(self): self.panel = wx.Panel(self, -1) self.pbutton = wx.Panel(self, -1) self.grid = wxgrid.Grid(self.panel) self.okbt = wx.Button(self.pbutton, wx.ID_OK) self.cancelbt = wx.Button(self.pbutton, wx.ID_CANCEL) def initSizers(self): self.sz = wx.BoxSizer(wx.VERTICAL) self.panelsz = wx.BoxSizer(wx.VERTICAL) self.pbuttonsz = wx.BoxSizer(wx.HORIZONTAL) self.panelsz.Add(self.grid, 1, wx.EXPAND|wx.ALL, 5) self.pbuttonsz.Add(self.okbt, 1, wx.EXPAND|wx.ALL, 5) self.pbuttonsz.Add(self.cancelbt, 1, wx.EXPAND|wx.ALL, 5) self.sz.Add(self.panel, 8, wx.EXPAND|wx.ALL, 5) self.sz.Add(self.pbutton, 1, wx.EXPAND|wx.ALL, 5) self.SetSizer(self.sz) self.panel.SetSizer(self.panelsz) self.pbutton.SetSizer(self.pbuttonsz) def initConfig(self): _rows = len(self.labels) self.grid.CreateGrid(_rows,1) self.grid.SetRowLabelSize(0) self.grid.SetColLabelSize(0) self.grid.SetColSize(0,160) for ii in range(_rows): self.grid.SetCellValue(ii,0,str(self.labels[ii].get_text())) def GetData(self): for k,ii in enumerate(range(len(self.labels))): val = self.grid.GetCellValue(ii,0) self.labels[k].set_text(val) return self.labels def test_about(): app=wx.App() fr = AboutDialog(None) app.MainLoop() def test_function(): app = wx.App() fr = BivariableFunctionDialog(None) if fr.ShowModal() == wx.ID_OK: print fr.GetData() fr.Destroy() app.MainLoop() def test_import(): app = wx.App() fr = ImportDialog(None) if fr.ShowModal() == wx.ID_OK: print fr.GetData() fr.Destroy() app.MainLoop() def test_tick(): f = plt.figure() ax = f.add_subplot(111) app = wx.App() fr = TickDialog(None,ax,"x") if fr.ShowModal() == wx.ID_OK: print fr.GetData() fr.Destroy() app.MainLoop() def test_axestoolbar(): app = wx.App() fr = wx.Frame(None, -1, "Hi !!!", size=(800,600)) sz = wx.BoxSizer(wx.VERTICAL) tb = AxesToolbar(fr) sz.Add(tb, 0, wx.EXPAND) fr.SetSizer(sz) tb.Realize() fr.Show() app.MainLoop() def test_linestyle(): app = wx.App() fr = LineStyleDialog(None) if fr.ShowModal() == wx.ID_OK: print fr.GetData() fr.Destroy() app.MainLoop() def test_pie(): f = plt.figure() ax = f.add_subplot(111) _, lbl = ax.pie([1,2,3]) app = wx.App() fr = PieLabelsDialog(None, lbl) if fr.ShowModal() == wx.ID_OK: print fr.GetData() fr.Destroy() app.MainLoop() if __name__=='__main__': test_function()
mit
thilbern/scikit-learn
sklearn/gaussian_process/tests/test_gaussian_process.py
17
6093
""" Testing for Gaussian Process module (sklearn.gaussian_process) """ # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # Licence: BSD 3 clause from nose.tools import raises from nose.tools import assert_true import numpy as np from sklearn.gaussian_process import GaussianProcess from sklearn.gaussian_process import regression_models as regression from sklearn.gaussian_process import correlation_models as correlation from sklearn.utils.testing import assert_greater f = lambda x: x * np.sin(x) X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T y = f(X).ravel() def test_1d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): """ MLE estimation of a one-dimensional Gaussian Process model. Check random start optimization. Test the interpolating property. """ gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=random_start, verbose=False).fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) y2_pred, MSE2 = gp.predict(X2, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.) and np.allclose(MSE2, 0., atol=10)) def test_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): """ MLE estimation of a two-dimensional Gaussian Process model accounting for anisotropy. Check random start optimization. Test the interpolating property. """ b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = g(X).ravel() thetaL = [1e-4] * 2 thetaU = [1e-1] * 2 gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=thetaL, thetaU=thetaU, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) assert_true(np.all(gp.theta_ >= thetaL)) # Lower bounds of hyperparameters assert_true(np.all(gp.theta_ <= thetaU)) # Upper bounds of hyperparameters def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): """ MLE estimation of a two-dimensional Gaussian Process model accounting for anisotropy. Check random start optimization. Test the GP interpolation for 2D output """ b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. f = lambda x: np.vstack((g(x), g(x))).T X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = f(X) gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=[1e-4] * 2, thetaU=[1e-1] * 2, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) @raises(ValueError) def test_wrong_number_of_outputs(): gp = GaussianProcess() gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3]) def test_more_builtin_correlation_models(random_start=1): """ Repeat test_1d and test_2d for several built-in correlation models specified as strings. """ all_corr = ['absolute_exponential', 'squared_exponential', 'cubic', 'linear'] for corr in all_corr: test_1d(regr='constant', corr=corr, random_start=random_start) test_2d(regr='constant', corr=corr, random_start=random_start) test_2d_2d(regr='constant', corr=corr, random_start=random_start) def test_ordinary_kriging(): """ Repeat test_1d and test_2d with given regression weights (beta0) for different regression models (Ordinary Kriging). """ test_1d(regr='linear', beta0=[0., 0.5]) test_1d(regr='quadratic', beta0=[0., 0.5, 0.5]) test_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) def test_no_normalize(): gp = GaussianProcess(normalize=False).fit(X, y) y_pred = gp.predict(X) assert_true(np.allclose(y_pred, y)) def test_random_starts(): """ Test that an increasing number of random-starts of GP fitting only increases the reduced likelihood function of the optimal theta. """ n_samples, n_features = 50, 3 np.random.seed(0) rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) * 2 - 1 y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) best_likelihood = -np.inf for random_start in range(1, 5): gp = GaussianProcess(regr="constant", corr="squared_exponential", theta0=[1e-0] * n_features, thetaL=[1e-4] * n_features, thetaU=[1e+1] * n_features, random_start=random_start, random_state=0, verbose=False).fit(X, y) rlf = gp.reduced_likelihood_function()[0] assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps) best_likelihood = rlf
bsd-3-clause
ron1818/Singaboat_RobotX2016
robotx_nav/nodes/color_totem_planner.py
3
8561
#!/usr/bin/env python import random import itertools import rospy from visualization_msgs.msg import MarkerArray, Marker from geometry_msgs.msg import Point, Quaternion import numpy as np from sklearn.cluster import KMeans, DBSCAN from sklearn import svm import planner_utils class ColorTotemPlanner(object): """ find coordinate of totem for task1 """ isready=False red_list, green_list, yellow_list, blue_list = list(), list(), list(), list() red_center, green_center, yellow_center, blue_center = list(), list(), list(), list() MAX_LENS = 20 # actually 21 def __init__(self, nodename="color_totem_coordinate", assigned=np.array([[2,False],[0,True],[1,False]])): """ assigned is a np.array: np.array[[color_id, is_ccw]], e.g. assigned = np.array([[2, False], [0, True], [1, False]]) """ rospy.init_node(nodename) rospy.on_shutdown(self.shutdown) self.id_counter = 0 self.threshold = 5 self.map_corners = np.array([[0,0], [0,40], [40,40], [40,0]]) self.rate = rospy.get_param("~rate", 1) # self.kmeans = KMeans(n_clusters=2) self.ocsvm = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) # Subscribe to marker array publisher rospy.Subscriber("filtered_marker_array", MarkerArray, self.markerarray_callback, queue_size=10) self.allvisited = False # is all totems visited? self.assigned = assigned # n * 2 array self.visited = np.array([False] * len(self.assigned)) # n*1 array self.totem_find = np.array([False] * len(self.assigned)) # n*1 array self.center = np.array([[0, 0, 0]] * len(self.assigned)) # initialize center, n*3 array self.radius = np.array([2.5] * len(self.assigned)) # initialize center, n*3 array # self.visited_dict = {"red": False, "green": False, "blue": False, "yellow": False} # self.requested_dict = {"red": False, "green": False, "blue": False, "yellow": False} self.hold_moveto = False # by default, let moveto to work self.hold_loiter = False # by default, let moveto to work # self.requested_moveto = False # by default, moveto is not requested # self.isready = False # is loiter ready? by default not go for loiter, only when loiter target identified self.exit_target = [-30, 15, 0] def planner(self): r = rospy.Rate(self.rate) # while not rospy.is_shutdown(): self.loiter_target = list() self.moveto_target = list() # if not self.totem_find: # self.moveto_target = planner_utils.random_walk(self.map_corners, style="unif", kwargs={"center": self.center}) # need to find centers # else: for i in range(len(self.assigned)): # visit the totems in range # print "totem", i if not self.totem_find[i]: # did not find the totem # print "totem", i, "not find" if not self.hold_moveto: print "can move" kwargs = {"center": self.center, "threshold": self.threshold} self.moveto_target = planner_utils.random_walk(self.map_corners, style="unif", **kwargs) # need to find centers self.hold_moveto = True else: # find the totem # print "find totem", i # print self.visited print "hold loiter?", self.hold_loiter print "visited", i, self.visited[i], self.visited[0:i], all(self.visited[0:i]) if not self.visited[i] and all(self.visited[0:i]): # previously all visited: print "can loiter?" if not self.hold_loiter: print "can loiter" # which totem, center, radius, ccw self.loiter_target = [i, self.center[i], self.radius[i], self.assigned[i,1]] self.hold_loiter = True self.hold_moveto = True # else: # if not self.hold_moveto: # kwargs = {"center": self.center, "threshold": self.threshold} # self.moveto_target = planner_utils.random_walk(self.map_corners, style="unif", **kwargs) # need to find centers # self.hold_moveto = True if all(self.visited): self.allvisited = True return self.totem_find, self.loiter_target, self.moveto_target, self.allvisited def markerarray_callback(self, msg): """ calculate average over accumulate """ for i in range(len(msg.markers)): if msg.markers[i].id == 0: # red if len(self.red_list) > self.MAX_LENS: self.red_list.pop(0) self.red_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) elif msg.markers[i].id == 1: # green if len(self.green_list) > self.MAX_LENS: self.green_list.pop(0) self.green_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) elif msg.markers[i].id == 2: # blue if len(self.blue_list) > self.MAX_LENS: self.blue_list.pop(0) self.blue_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) elif msg.markers[i].id == 5: # yellow if len(self.yellow_list) > self.MAX_LENS: self.yellow_list.pop(0) self.yellow_list.append([msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]) self.find_totem_center() def find_totem_center(self): # k means clustering for all color if len(self.red_list) >= self.MAX_LENS: # have red totem info self.red_center = self.one_class_svm(self.red_list) if len(self.green_list) >= self.MAX_LENS: # have green totem info self.green_center = self.one_class_svm(self.green_list) if len(self.blue_list) >= self.MAX_LENS: # have blue totem info self.blue_center = self.one_class_svm(self.blue_list) if len(self.yellow_list) >= self.MAX_LENS: # have yellow totem info self.yellow_center = self.one_class_svm(self.yellow_list) for i in range(len(self.assigned)): # for each color if self.assigned[i,0] == 0 and self.red_center != []: # red self.center[i,:] = self.red_center self.totem_find[i] = True elif self.assigned[i,0] == 1 and self.green_center != []: # green self.center[i,:] = self.green_center self.totem_find[i] = True elif self.assigned[i,0] == 2 and self.blue_center != []: # blue self.center[i,:] = self.blue_center self.totem_find[i] = True elif self.assigned[i,0] == 5 and self.yellow_center != []: # yellow self.center[i,:] = self.yellow_center self.totem_find[i] = True def one_class_svm(self, data_list): """ return support vector and thus cluster center """ data_list = np.array(data_list) self.ocsvm.fit(data_list) sv = self.ocsvm.support_vectors_ # find the sv's centroid, assume only one cluster. return (np.mean(sv[:,0]), np.mean(sv[:,1]), 0) # return (np.median(sv[:,0]), np.median(sv[:,1])) def update_loiter(self, hold_loiter): """ update from external process""" # hold_loiter, visit_id = res self.hold_loiter = hold_loiter self.hold_moveto = hold_loiter # print "visit id", visit_id print "hold_loiter???? ????", self.hold_loiter self.visited[self.id_counter] = True print self.visited self.id_counter += 1 def update_hold_moveto(self, hold_moveto): """ update from external process""" self.hold_moveto = hold_moveto def random_walk(self, centers): # use planner_utils.random_walk instead """ create random walk points and avoid valid centers """ self.mapsize = (-30, 30) x_range = range(self.mapsize[0], 0, 5) y_range = range(0, self.mapsize[1], 5) grid = list(itertools.product(x_range, y_range)) return random.choice(grid) def shutdown(self): pass if __name__ == "__main__": try: totem=ColorTotemPlanner("color_totem_planner") except rospy.ROSInterruptException: pass
gpl-3.0
NeurotechBerkeley/bci-course
lab5/lsl-record.py
5
2841
#!/usr/bin/env python ## code by Alexandre Barachant import numpy as np import pandas as pd from time import time, strftime, gmtime from optparse import OptionParser from pylsl import StreamInlet, resolve_byprop from sklearn.linear_model import LinearRegression default_fname = ("data/data_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime())) parser = OptionParser() parser.add_option("-d", "--duration", dest="duration", type='int', default=300, help="duration of the recording in seconds.") parser.add_option("-f", "--filename", dest="filename", type='str', default=default_fname, help="Name of the recording file.") # dejitter timestamps dejitter = False (options, args) = parser.parse_args() print("looking for an EEG stream...") streams = resolve_byprop('type', 'EEG', timeout=2) if len(streams) == 0: raise(RuntimeError, "Cant find EEG stream") print("Start aquiring data") inlet = StreamInlet(streams[0], max_chunklen=12) eeg_time_correction = inlet.time_correction() print("looking for a Markers stream...") marker_streams = resolve_byprop('type', 'Markers', timeout=2) if marker_streams: inlet_marker = StreamInlet(marker_streams[0]) marker_time_correction = inlet_marker.time_correction() else: inlet_marker = False print("Cant find Markers stream") info = inlet.info() description = info.desc() freq = info.nominal_srate() Nchan = info.channel_count() ch = description.child('channels').first_child() ch_names = [ch.child_value('label')] for i in range(1, Nchan): ch = ch.next_sibling() ch_names.append(ch.child_value('label')) res = [] timestamps = [] markers = [] t_init = time() print('Start recording at time t=%.3f' % t_init) while (time() - t_init) < options.duration: try: data, timestamp = inlet.pull_chunk(timeout=1.0, max_samples=12) if timestamp: res.append(data) timestamps.extend(timestamp) if inlet_marker: marker, timestamp = inlet_marker.pull_sample(timeout=0.0) if timestamp: markers.append([marker, timestamp]) except KeyboardInterrupt: break res = np.concatenate(res, axis=0) timestamps = np.array(timestamps) if dejitter: y = timestamps X = np.atleast_2d(np.arange(0, len(y))).T lr = LinearRegression() lr.fit(X, y) timestamps = lr.predict(X) res = np.c_[timestamps, res] data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names) data['Marker'] = 0 # process markers: for marker in markers: # find index of margers ix = np.argmin(np.abs(marker[1] - timestamps)) val = timestamps[ix] data.loc[ix, 'Marker'] = marker[0][0] data.to_csv(options.filename, float_format='%.3f', index=False) print('Done !')
mit
oesteban/seaborn
seaborn/timeseries.py
4
15212
"""Timeseries plotting functions.""" from __future__ import division import numpy as np import pandas as pd from scipy import stats, interpolate import matplotlib as mpl import matplotlib.pyplot as plt from .external.six import string_types from . import utils from . import algorithms as algo from .palettes import color_palette def tsplot(data, time=None, unit=None, condition=None, value=None, err_style="ci_band", ci=68, interpolate=True, color=None, estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None, legend=True, ax=None, **kwargs): """Plot one or more timeseries with flexible representation of uncertainty. This function is intended to be used with data where observations are nested within sampling units that were measured at multiple timepoints. It can take data specified either as a long-form (tidy) DataFrame or as an ndarray with dimensions (unit, time) The interpretation of some of the other parameters changes depending on the type of object passed as data. Parameters ---------- data : DataFrame or ndarray Data for the plot. Should either be a "long form" dataframe or an array with dimensions (unit, time, condition). In both cases, the condition field/dimension is optional. The type of this argument determines the interpretation of the next few parameters. When using a DataFrame, the index has to be sequential. time : string or series-like Either the name of the field corresponding to time in the data DataFrame or x values for a plot when data is an array. If a Series, the name will be used to label the x axis. unit : string Field in the data DataFrame identifying the sampling unit (e.g. subject, neuron, etc.). The error representation will collapse over units at each time/condition observation. This has no role when data is an array. value : string Either the name of the field corresponding to the data values in the data DataFrame (i.e. the y coordinate) or a string that forms the y axis label when data is an array. condition : string or Series-like Either the name of the field identifying the condition an observation falls under in the data DataFrame, or a sequence of names with a length equal to the size of the third dimension of data. There will be a separate trace plotted for each condition. If condition is a Series with a name attribute, the name will form the title for the plot legend (unless legend is set to False). err_style : string or list of strings or None Names of ways to plot uncertainty across units from set of {ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}. Can use one or more than one method. ci : float or list of floats in [0, 100] Confidence interval size(s). If a list, it will stack the error plots for each confidence interval. Only relevant for error styles with "ci" in the name. interpolate : boolean Whether to do a linear interpolation between each timepoint when plotting. The value of this parameter also determines the marker used for the main plot traces, unless marker is specified as a keyword argument. color : seaborn palette or matplotlib color name or dictionary Palette or color for the main plots and error representation (unless plotting by unit, which can be separately controlled with err_palette). If a dictionary, should map condition name to color spec. estimator : callable Function to determine central tendency and to pass to bootstrap must take an ``axis`` argument. n_boot : int Number of bootstrap iterations. err_palette : seaborn palette Palette name or list of colors used when plotting data for each unit. err_kws : dict, optional Keyword argument dictionary passed through to matplotlib function generating the error plot, legend : bool, optional If ``True`` and there is a ``condition`` variable, add a legend to the plot. ax : axis object, optional Plot in given axis; if None creates a new figure kwargs : Other keyword arguments are passed to main plot() call Returns ------- ax : matplotlib axis axis with plot data Examples -------- Plot a trace with translucent confidence bands: .. plot:: :context: close-figs >>> import numpy as np; np.random.seed(22) >>> import seaborn as sns; sns.set(color_codes=True) >>> x = np.linspace(0, 15, 31) >>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1) >>> ax = sns.tsplot(data=data) Plot a long-form dataframe with several conditions: .. plot:: :context: close-figs >>> gammas = sns.load_dataset("gammas") >>> ax = sns.tsplot(time="timepoint", value="BOLD signal", ... unit="subject", condition="ROI", ... data=gammas) Use error bars at the positions of the observations: .. plot:: :context: close-figs >>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g") Don't interpolate between the observations: .. plot:: :context: close-figs >>> import matplotlib.pyplot as plt >>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False) Show multiple confidence bands: .. plot:: :context: close-figs >>> ax = sns.tsplot(data=data, ci=[68, 95], color="m") Use a different estimator: .. plot:: :context: close-figs >>> ax = sns.tsplot(data=data, estimator=np.median) Show each bootstrap resample: .. plot:: :context: close-figs >>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500) Show the trace from each sampling unit: .. plot:: :context: close-figs >>> ax = sns.tsplot(data=data, err_style="unit_traces") """ # Sort out default values for the parameters if ax is None: ax = plt.gca() if err_kws is None: err_kws = {} # Handle different types of input data if isinstance(data, pd.DataFrame): xlabel = time ylabel = value # Condition is optional if condition is None: condition = pd.Series(np.ones(len(data))) legend = False legend_name = None n_cond = 1 else: legend = True and legend legend_name = condition n_cond = len(data[condition].unique()) else: data = np.asarray(data) # Data can be a timecourse from a single unit or # several observations in one condition if data.ndim == 1: data = data[np.newaxis, :, np.newaxis] elif data.ndim == 2: data = data[:, :, np.newaxis] n_unit, n_time, n_cond = data.shape # Units are experimental observations. Maybe subjects, or neurons if unit is None: units = np.arange(n_unit) unit = "unit" units = np.repeat(units, n_time * n_cond) ylabel = None # Time forms the xaxis of the plot if time is None: times = np.arange(n_time) else: times = np.asarray(time) xlabel = None if hasattr(time, "name"): xlabel = time.name time = "time" times = np.tile(np.repeat(times, n_cond), n_unit) # Conditions split the timeseries plots if condition is None: conds = range(n_cond) legend = False if isinstance(color, dict): err = "Must have condition names if using color dict." raise ValueError(err) else: conds = np.asarray(condition) legend = True and legend if hasattr(condition, "name"): legend_name = condition.name else: legend_name = None condition = "cond" conds = np.tile(conds, n_unit * n_time) # Value forms the y value in the plot if value is None: ylabel = None else: ylabel = value value = "value" # Convert to long-form DataFrame data = pd.DataFrame(dict(value=data.ravel(), time=times, unit=units, cond=conds)) # Set up the err_style and ci arguments for the loop below if isinstance(err_style, string_types): err_style = [err_style] elif err_style is None: err_style = [] if not hasattr(ci, "__iter__"): ci = [ci] # Set up the color palette if color is None: current_palette = utils.get_color_cycle() if len(current_palette) < n_cond: colors = color_palette("husl", n_cond) else: colors = color_palette(n_colors=n_cond) elif isinstance(color, dict): colors = [color[c] for c in data[condition].unique()] else: try: colors = color_palette(color, n_cond) except ValueError: color = mpl.colors.colorConverter.to_rgb(color) colors = [color] * n_cond # Do a groupby with condition and plot each trace for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)): df_c = df_c.pivot(unit, time, value) x = df_c.columns.values.astype(np.float) # Bootstrap the data for confidence intervals boot_data = algo.bootstrap(df_c.values, n_boot=n_boot, axis=0, func=estimator) cis = [utils.ci(boot_data, v, axis=0) for v in ci] central_data = estimator(df_c.values, axis=0) # Get the color for this condition color = colors[c] # Use subroutines to plot the uncertainty for style in err_style: # Allow for null style (only plot central tendency) if style is None: continue # Grab the function from the global environment try: plot_func = globals()["_plot_%s" % style] except KeyError: raise ValueError("%s is not a valid err_style" % style) # Possibly set up to plot each observation in a different color if err_palette is not None and "unit" in style: orig_color = color color = color_palette(err_palette, len(df_c.values)) # Pass all parameters to the error plotter as keyword args plot_kwargs = dict(ax=ax, x=x, data=df_c.values, boot_data=boot_data, central_data=central_data, color=color, err_kws=err_kws) # Plot the error representation, possibly for multiple cis for ci_i in cis: plot_kwargs["ci"] = ci_i plot_func(**plot_kwargs) if err_palette is not None and "unit" in style: color = orig_color # Plot the central trace kwargs.setdefault("marker", "" if interpolate else "o") ls = kwargs.pop("ls", "-" if interpolate else "") kwargs.setdefault("linestyle", ls) label = cond if legend else "_nolegend_" ax.plot(x, central_data, color=color, label=label, **kwargs) # Pad the sides of the plot only when not interpolating ax.set_xlim(x.min(), x.max()) x_diff = x[1] - x[0] if not interpolate: ax.set_xlim(x.min() - x_diff, x.max() + x_diff) # Add the plot labels if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) if legend: ax.legend(loc=0, title=legend_name) return ax # Subroutines for tsplot errorbar plotting # ---------------------------------------- def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs): """Plot translucent error bands around the central tendancy.""" low, high = ci if "alpha" not in err_kws: err_kws["alpha"] = 0.2 ax.fill_between(x, low, high, facecolor=color, **err_kws) def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs): """Plot error bars at each data point.""" for x_i, y_i, (low, high) in zip(x, central_data, ci.T): ax.plot([x_i, x_i], [low, high], color=color, solid_capstyle="round", **err_kws) def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs): """Plot 250 traces from bootstrap.""" err_kws.setdefault("alpha", 0.25) err_kws.setdefault("linewidth", 0.25) if "lw" in err_kws: err_kws["linewidth"] = err_kws.pop("lw") ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws) def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs): """Plot a trace for each observation in the original data.""" if isinstance(color, list): if "alpha" not in err_kws: err_kws["alpha"] = .5 for i, obs in enumerate(data): ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws) else: if "alpha" not in err_kws: err_kws["alpha"] = .2 ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws) def _plot_unit_points(ax, x, data, color, err_kws, **kwargs): """Plot each original data point discretely.""" if isinstance(color, list): for i, obs in enumerate(data): ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4, label="_nolegend_", **err_kws) else: ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4, label="_nolegend_", **err_kws) def _plot_boot_kde(ax, x, boot_data, color, **kwargs): """Plot the kernal density estimate of the bootstrap distribution.""" kwargs.pop("data") _ts_kde(ax, x, boot_data, color, **kwargs) def _plot_unit_kde(ax, x, data, color, **kwargs): """Plot the kernal density estimate over the sample.""" _ts_kde(ax, x, data, color, **kwargs) def _ts_kde(ax, x, data, color, **kwargs): """Upsample over time and plot a KDE of the bootstrap distribution.""" kde_data = [] y_min, y_max = data.min(), data.max() y_vals = np.linspace(y_min, y_max, 100) upsampler = interpolate.interp1d(x, data) data_upsample = upsampler(np.linspace(x.min(), x.max(), 100)) for pt_data in data_upsample.T: pt_kde = stats.kde.gaussian_kde(pt_data) kde_data.append(pt_kde(y_vals)) kde_data = np.transpose(kde_data) rgb = mpl.colors.ColorConverter().to_rgb(color) img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4)) img[:, :, :3] = rgb kde_data /= kde_data.max(axis=0) kde_data[kde_data > 1] = 1 img[:, :, 3] = kde_data ax.imshow(img, interpolation="spline16", zorder=2, extent=(x.min(), x.max(), y_min, y_max), aspect="auto", origin="lower")
bsd-3-clause
jpautom/scikit-learn
examples/cluster/plot_dict_face_patches.py
337
2747
""" Online learning of a dictionary of parts of faces ================================================== This example uses a large dataset of faces to learn a set of 20 x 20 images patches that constitute faces. From the programming standpoint, it is interesting because it shows how to use the online API of the scikit-learn to process a very large dataset by chunks. The way we proceed is that we load an image at a time and extract randomly 50 patches from this image. Once we have accumulated 500 of these patches (using 10 images), we run the `partial_fit` method of the online KMeans object, MiniBatchKMeans. The verbose setting on the MiniBatchKMeans enables us to see that some clusters are reassigned during the successive calls to partial-fit. This is because the number of patches that they represent has become too low, and it is better to choose a random new cluster. """ print(__doc__) import time import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.image import extract_patches_2d faces = datasets.fetch_olivetti_faces() ############################################################################### # Learn the dictionary of images print('Learning the dictionary... ') rng = np.random.RandomState(0) kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True) patch_size = (20, 20) buffer = [] index = 1 t0 = time.time() # The online learning part: cycle over the whole dataset 6 times index = 0 for _ in range(6): for img in faces.images: data = extract_patches_2d(img, patch_size, max_patches=50, random_state=rng) data = np.reshape(data, (len(data), -1)) buffer.append(data) index += 1 if index % 10 == 0: data = np.concatenate(buffer, axis=0) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) kmeans.partial_fit(data) buffer = [] if index % 100 == 0: print('Partial fit of %4i out of %i' % (index, 6 * len(faces.images))) dt = time.time() - t0 print('done in %.2fs.' % dt) ############################################################################### # Plot the results plt.figure(figsize=(4.2, 4)) for i, patch in enumerate(kmeans.cluster_centers_): plt.subplot(9, 9, i + 1) plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' % (dt, 8 * len(faces.images)), fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show()
bsd-3-clause
HugoMartin78/bim-classifier
BIM_Classifier_TextOnly.py
1
2963
# -*- coding: utf-8 -*- """ Created on Fri Jun 19 14:07:37 2015 @author: Hugo """ import numpy import re from scipy.sparse import hstack from scipy import sparse from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier from sklearn.svm import SVC # Chargement des données: # Chaque ligne représente un objet de la maquette BIM # Par colonne : # 1.Nom de l'objet # 2.Nom de son/ses matériaux # 3.Nom de son/ses textures présent dans les matériaux # 4.Nombre de polygon # 5.Volume de l'objet # 6.Label # 7. Numéro de maquette data = numpy.load('Data.npy') # On traduit les labels string en valeur numérique y = data[:,5] y[y == 'S'] = 0 y[y == 'F'] = 1 y[y == 'I'] = 2 y[y == 'P'] = 3 y[y == 'M'] = 4 # récuperation des numéroes de modèles. models = data[:,6].astype(numpy.float) # data ne garde que la variable nom data = numpy.delete(data,[1,2,3,4,5,6],1) # Pour les trois première colonnes (TXT) on ne garde que les caractères standards. for i in range(0,len(data)): data[i] = re.sub('[^AZERTYUIOPQSDFGHJKLMWXCVBNazertyuiopqsdfghjklmwxcvbn]', '', data[i]).lower() # Delcaration des pre-processing / classifier ngram_vectorizer = CountVectorizer(analyzer='char_wb', ngram_range=(3, 3), min_df=0) transformer = TfidfTransformer() # Chaque paramètres représente la meilleur performance de l'ensemble classifiers = [ SGDClassifier(alpha = 1e-4), DecisionTreeClassifier(max_depth=None), SVC(gamma=2, C=1), RandomForestClassifier(n_estimators=60), AdaBoostClassifier()] Result = numpy.empty((0,3), float) # Boucle sur les classifiers for clf in classifiers: Scores = numpy.array([]) # Boucle One leave out for i in range(0,20): # On definie l'échatillon de test et d'apprentissage Sample = data[numpy.where(models != i)] L_Sample = y[numpy.where(models != i)] Test = data[numpy.where(models == i)] L_Test = y[numpy.where(models == i)] # Traitement du Text tri-gram + tfidf pour chaque variable textuelle ngram = ngram_vectorizer.fit_transform(Sample) T_ngram = ngram_vectorizer.transform(Test) X_train = transformer.fit_transform(ngram) X_test = transformer.transform(T_ngram) # On classe clf.fit(X_train, L_Sample.astype(numpy.int32)) Scores = numpy.append(Scores,clf.score(X_test,L_Test.astype(numpy.int32))) Result = numpy.append(Result,[numpy.mean(Scores),numpy.median(Scores),numpy.std(Scores)]) numpy.save('Result.npy',Result)
gpl-3.0
marcsans/cnn-physics-perception
phy/lib/python2.7/site-packages/matplotlib/backends/backend_qt4.py
8
2867
from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six from matplotlib.externals.six import unichr import os import re import signal import sys import matplotlib from matplotlib.cbook import is_string_like from matplotlib.backend_bases import FigureManagerBase from matplotlib.backend_bases import FigureCanvasBase from matplotlib.backend_bases import NavigationToolbar2 from matplotlib.backend_bases import cursors from matplotlib.backend_bases import TimerBase from matplotlib.backend_bases import ShowBase from matplotlib._pylab_helpers import Gcf from matplotlib.figure import Figure from matplotlib.widgets import SubplotTool try: import matplotlib.backends.qt_editor.figureoptions as figureoptions except ImportError: figureoptions = None from .qt_compat import QtCore, QtWidgets, _getSaveFileName, __version__ from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool from .backend_qt5 import (backend_version, SPECIAL_KEYS, SUPER, ALT, CTRL, SHIFT, MODIFIER_KEYS, fn_name, cursord, draw_if_interactive, _create_qApp, show, TimerQT, MainWindow, FigureManagerQT, NavigationToolbar2QT, SubplotToolQt, error_msg_qt, exception_handler) from .backend_qt5 import FigureCanvasQT as FigureCanvasQT5 DEBUG = False def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ thisFig = Figure(*args, **kwargs) return new_figure_manager_given_figure(num, thisFig) def new_figure_manager_given_figure(num, figure): """ Create a new figure manager instance for the given figure. """ canvas = FigureCanvasQT(figure) manager = FigureManagerQT(canvas, num) return manager class FigureCanvasQT(FigureCanvasQT5): def __init__(self, figure): if DEBUG: print('FigureCanvasQt qt4: ', figure) _create_qApp() # Note different super-calling style to backend_qt5 QtWidgets.QWidget.__init__(self) FigureCanvasBase.__init__(self, figure) self.figure = figure self.setMouseTracking(True) self._idle = True w, h = self.get_width_height() self.resize(w, h) def wheelEvent(self, event): x = event.x() # flipy so y=0 is bottom of canvas y = self.figure.bbox.height - event.y() # from QWheelEvent::delta doc steps = event.delta()/120 if (event.orientation() == QtCore.Qt.Vertical): FigureCanvasBase.scroll_event(self, x, y, steps) if DEBUG: print('scroll event: delta = %i, ' 'steps = %i ' % (event.delta(), steps)) FigureCanvas = FigureCanvasQT FigureManager = FigureManagerQT
mit
ltiao/scikit-learn
examples/linear_model/plot_sgd_separating_hyperplane.py
260
1219
""" ========================================= SGD: Maximum margin separating hyperplane ========================================= Plot the maximum margin separating hyperplane within a two-class separable dataset using a linear Support Vector Machines classifier trained using SGD. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import SGDClassifier from sklearn.datasets.samples_generator import make_blobs # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([x1, x2]) Z[i, j] = p[0] levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.axis('tight') plt.show()
bsd-3-clause
themrmax/scikit-learn
examples/neighbors/plot_regression.py
349
1402
""" ============================ Nearest Neighbors regression ============================ Demonstrate the resolution of a regression problem using a k-Nearest Neighbor and the interpolation of the target using both barycenter and constant weights. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # # License: BSD 3 clause (C) INRIA ############################################################################### # Generate sample data import numpy as np import matplotlib.pyplot as plt from sklearn import neighbors np.random.seed(0) X = np.sort(5 * np.random.rand(40, 1), axis=0) T = np.linspace(0, 5, 500)[:, np.newaxis] y = np.sin(X).ravel() # Add noise to targets y[::5] += 1 * (0.5 - np.random.rand(8)) ############################################################################### # Fit regression model n_neighbors = 5 for i, weights in enumerate(['uniform', 'distance']): knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights) y_ = knn.fit(X, y).predict(T) plt.subplot(2, 1, i + 1) plt.scatter(X, y, c='k', label='data') plt.plot(T, y_, c='g', label='prediction') plt.axis('tight') plt.legend() plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors, weights)) plt.show()
bsd-3-clause
juggernautone/trading-with-python
lib/functions.py
76
11627
# -*- coding: utf-8 -*- """ twp support functions @author: Jev Kuznetsov Licence: GPL v2 """ from scipy import polyfit, polyval import datetime as dt #from datetime import datetime, date from pandas import DataFrame, Index, Series import csv import matplotlib.pyplot as plt import numpy as np import pandas as pd def nans(shape, dtype=float): ''' create a nan numpy array ''' a = np.empty(shape, dtype) a.fill(np.nan) return a def plotCorrelationMatrix(price, thresh = None): ''' plot a correlation matrix as a heatmap image inputs: price: prices DataFrame thresh: correlation threshold to use for checking, default None ''' symbols = price.columns.tolist() R = price.pct_change() correlationMatrix = R.corr() if thresh is not None: correlationMatrix = correlationMatrix > thresh plt.imshow(abs(correlationMatrix.values),interpolation='none') plt.xticks(range(len(symbols)),symbols) plt.yticks(range(len(symbols)),symbols) plt.colorbar() plt.title('Correlation matrix') return correlationMatrix def pca(A): """ performs principal components analysis (PCA) on the n-by-p DataFrame A Rows of A correspond to observations, columns to variables. Returns : coeff : principal components, column-wise transform: A in principal component space latent : eigenvalues """ # computing eigenvalues and eigenvectors of covariance matrix M = (A - A.mean()).T # subtract the mean (along columns) [latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted idx = np.argsort(latent) # sort eigenvalues idx = idx[::-1] # in ascending order coeff = coeff[:,idx] latent = latent[idx] score = np.dot(coeff.T,A.T) # projection of the data in the new space transform = DataFrame(index = A.index, data = score.T) return coeff,transform,latent def pos2pnl(price,position , ibTransactionCost=False ): """ calculate pnl based on price and position Inputs: --------- price: series or dataframe of price position: number of shares at each time. Column names must be same as in price ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share Returns a portfolio DataFrame """ delta=position.diff() port = DataFrame(index=price.index) if isinstance(price,Series): # no need to sum along 1 for series port['cash'] = (-delta*price).cumsum() port['stock'] = (position*price) else: # dealing with DataFrame here port['cash'] = (-delta*price).sum(axis=1).cumsum() port['stock'] = (position*price).sum(axis=1) if ibTransactionCost: tc = -0.005*position.diff().abs() # basic transaction cost tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$ if isinstance(price,DataFrame): tc = tc.sum(axis=1) port['tc'] = tc.cumsum() else: port['tc'] = 0. port['total'] = port['stock']+port['cash']+port['tc'] return port def tradeBracket(price,entryBar,maxTradeLength,bracket): ''' trade a symmetrical bracket on price series, return price delta and exit bar # Input ------ price : series of price values entryBar: entry bar number maxTradeLength : max trade duration in bars bracket : allowed price deviation ''' lastBar = min(entryBar+maxTradeLength,len(price)-1) p = price[entryBar:lastBar]-price[entryBar] idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket if idxOutOfBound[0].any(): # found match priceDelta = p[idxOutOfBound[0][0]] exitBar = idxOutOfBound[0][0]+entryBar else: # all in bracket, exiting based on time priceDelta = p[-1] exitBar = lastBar return priceDelta, exitBar def estimateBeta(priceY,priceX,algo = 'standard'): ''' estimate stock Y vs stock X beta using iterative linear regression. Outliers outside 3 sigma boundary are filtered out Parameters -------- priceX : price series of x (usually market) priceY : price series of y (estimate beta of this price) Returns -------- beta : stockY beta relative to stock X ''' X = DataFrame({'x':priceX,'y':priceY}) if algo=='returns': ret = (X/X.shift(1)-1).dropna().values #print len(ret) x = ret[:,0] y = ret[:,1] # filter high values low = np.percentile(x,20) high = np.percentile(x,80) iValid = (x>low) & (x<high) x = x[iValid] y = y[iValid] iteration = 1 nrOutliers = 1 while iteration < 10 and nrOutliers > 0 : (a,b) = polyfit(x,y,1) yf = polyval([a,b],x) #plot(x,y,'x',x,yf,'r-') err = yf-y idxOutlier = abs(err) > 3*np.std(err) nrOutliers =sum(idxOutlier) beta = a #print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers) x = x[~idxOutlier] y = y[~idxOutlier] iteration += 1 elif algo=='log': x = np.log(X['x']) y = np.log(X['y']) (a,b) = polyfit(x,y,1) beta = a elif algo=='standard': ret =np.log(X).diff().dropna() beta = ret['x'].cov(ret['y'])/ret['x'].var() else: raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'") return beta def estimateVolatility(ohlc, N=10, algo='YangZhang'): """ Volatility estimation Possible algorithms: ['YangZhang', 'CC'] """ cc = np.log(ohlc.close/ohlc.close.shift(1)) if algo == 'YangZhang': # Yang-zhang volatility ho = np.log(ohlc.high/ohlc.open) lo = np.log(ohlc.low/ohlc.open) co = np.log(ohlc.close/ohlc.open) oc = np.log(ohlc.open/ohlc.close.shift(1)) oc_sq = oc**2 cc_sq = cc**2 rs = ho*(ho-co)+lo*(lo-co) close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0)) open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0)) window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0)) result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252) result[:N-1] = np.nan elif algo == 'CC': # standard close-close estimator result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N)) else: raise ValueError('Unknown algo type.') return result*100 def rank(current,past): ''' calculate a relative rank 0..1 for a value against series ''' return (current>past).sum()/float(past.count()) def returns(df): return (df/df.shift(1)-1) def logReturns(df): t = np.log(df) return t-t.shift(1) def dateTimeToDate(idx): ''' convert datetime index to date ''' dates = [] for dtm in idx: dates.append(dtm.date()) return dates def readBiggerScreener(fName): ''' import data from Bigger Capital screener ''' with open(fName,'rb') as f: reader = csv.reader(f) rows = [row for row in reader] header = rows[0] data = [[] for i in range(len(header))] for row in rows[1:]: for i,elm in enumerate(row): try: data[i].append(float(elm)) except Exception: data[i].append(str(elm)) return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header] def sharpe(pnl): return np.sqrt(250)*pnl.mean()/pnl.std() def drawdown(s): """ calculate max drawdown and duration Input: s, price or cumulative pnl curve $ Returns: drawdown : vector of drawdwon values duration : vector of drawdown duration """ # convert to array if got pandas series, 10x speedup if isinstance(s,pd.Series): idx = s.index s = s.values returnSeries = True else: returnSeries = False if s.min() < 0: # offset if signal minimum is less than zero s = s-s.min() highwatermark = np.zeros(len(s)) drawdown = np.zeros(len(s)) drawdowndur = np.zeros(len(s)) for t in range(1,len(s)): highwatermark[t] = max(highwatermark[t-1], s[t]) drawdown[t] = (highwatermark[t]-s[t]) drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1) if returnSeries: return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur) else: return drawdown , drawdowndur def profitRatio(pnl): ''' calculate profit ratio as sum(pnl)/drawdown Input: pnl - daily pnl, Series or DataFrame ''' def processVector(pnl): # process a single column s = pnl.fillna(0) dd = drawdown(s)[0] p = s.sum()/dd.max() return p if isinstance(pnl,Series): return processVector(pnl) elif isinstance(pnl,DataFrame): p = Series(index = pnl.columns) for col in pnl.columns: p[col] = processVector(pnl[col]) return p else: raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl))) def candlestick(df,width=0.5, colorup='b', colordown='r'): ''' plot a candlestick chart of a dataframe ''' O = df['open'].values H = df['high'].values L = df['low'].values C = df['close'].values fig = plt.gcf() ax = plt.axes() #ax.hold(True) X = df.index #plot high and low ax.bar(X,height=H-L,bottom=L,width=0.1,color='k') idxUp = C>O ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup) idxDown = C<=O ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown) try: fig.autofmt_xdate() except Exception: # pragma: no cover pass ax.grid(True) #ax.bar(x,height=H-L,bottom=L,width=0.01,color='k') def datetime2matlab(t): ''' convert datetime timestamp to matlab numeric timestamp ''' mdn = t + dt.timedelta(days = 366) frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0) return mdn.toordinal() + frac def getDataSources(fName = None): ''' return data sources directories for this machine. directories are defined in datasources.ini or provided filepath''' import socket from ConfigParser import ConfigParser pcName = socket.gethostname() p = ConfigParser() p.optionxform = str if fName is None: fName = 'datasources.ini' p.read(fName) if pcName not in p.sections(): raise NameError('Host name section %s not found in file %s' %(pcName,fName)) dataSources = {} for option in p.options(pcName): dataSources[option] = p.get(pcName,option) return dataSources if __name__ == '__main__': df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]}) plt.clf() candlestick(df)
bsd-3-clause
macks22/scikit-learn
sklearn/linear_model/tests/test_theil_sen.py
234
9928
""" Testing for Theil-Sen module (sklearn.linear_model.theil_sen) """ # Author: Florian Wilhelm <florian.wilhelm@gmail.com> # License: BSD 3 clause from __future__ import division, print_function, absolute_import import os import sys from contextlib import contextmanager import numpy as np from numpy.testing import assert_array_equal, assert_array_less from numpy.testing import assert_array_almost_equal, assert_warns from scipy.linalg import norm from scipy.optimize import fmin_bfgs from nose.tools import raises, assert_almost_equal from sklearn.utils import ConvergenceWarning from sklearn.linear_model import LinearRegression, TheilSenRegressor from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point from sklearn.linear_model.theil_sen import _modified_weiszfeld_step from sklearn.utils.testing import assert_greater, assert_less @contextmanager def no_stdout_stderr(): old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = open(os.devnull, 'w') sys.stderr = open(os.devnull, 'w') yield sys.stdout.flush() sys.stderr.flush() sys.stdout = old_stdout sys.stderr = old_stderr def gen_toy_problem_1d(intercept=True): random_state = np.random.RandomState(0) # Linear model y = 3*x + N(2, 0.1**2) w = 3. if intercept: c = 2. n_samples = 50 else: c = 0.1 n_samples = 100 x = random_state.normal(size=n_samples) noise = 0.1 * random_state.normal(size=n_samples) y = w * x + c + noise # Add some outliers if intercept: x[42], y[42] = (-2, 4) x[43], y[43] = (-2.5, 8) x[33], y[33] = (2.5, 1) x[49], y[49] = (2.1, 2) else: x[42], y[42] = (-2, 4) x[43], y[43] = (-2.5, 8) x[53], y[53] = (2.5, 1) x[60], y[60] = (2.1, 2) x[72], y[72] = (1.8, -7) return x[:, np.newaxis], y, w, c def gen_toy_problem_2d(): random_state = np.random.RandomState(0) n_samples = 100 # Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2) X = random_state.normal(size=(n_samples, 2)) w = np.array([5., 10.]) c = 1. noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise # Add some outliers n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) return X, y, w, c def gen_toy_problem_4d(): random_state = np.random.RandomState(0) n_samples = 10000 # Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2) X = random_state.normal(size=(n_samples, 4)) w = np.array([5., 10., 42., 7.]) c = 1. noise = 0.1 * random_state.normal(size=n_samples) y = np.dot(X, w) + c + noise # Add some outliers n_outliers = n_samples // 10 ix = random_state.randint(0, n_samples, size=n_outliers) y[ix] = 50 * random_state.normal(size=n_outliers) return X, y, w, c def test_modweiszfeld_step_1d(): X = np.array([1., 2., 3.]).reshape(3, 1) # Check startvalue is element of X and solution median = 2. new_y = _modified_weiszfeld_step(X, median) assert_array_almost_equal(new_y, median) # Check startvalue is not the solution y = 2.5 new_y = _modified_weiszfeld_step(X, y) assert_array_less(median, new_y) assert_array_less(new_y, y) # Check startvalue is not the solution but element of X y = 3. new_y = _modified_weiszfeld_step(X, y) assert_array_less(median, new_y) assert_array_less(new_y, y) # Check that a single vector is identity X = np.array([1., 2., 3.]).reshape(1, 3) y = X[0, ] new_y = _modified_weiszfeld_step(X, y) assert_array_equal(y, new_y) def test_modweiszfeld_step_2d(): X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2) y = np.array([0.5, 0.5]) # Check first two iterations new_y = _modified_weiszfeld_step(X, y) assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3])) new_y = _modified_weiszfeld_step(X, new_y) assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592])) # Check fix point y = np.array([0.21132505, 0.78867497]) new_y = _modified_weiszfeld_step(X, y) assert_array_almost_equal(new_y, y) def test_spatial_median_1d(): X = np.array([1., 2., 3.]).reshape(3, 1) true_median = 2. _, median = _spatial_median(X) assert_array_almost_equal(median, true_median) # Test larger problem and for exact solution in 1d case random_state = np.random.RandomState(0) X = random_state.randint(100, size=(1000, 1)) true_median = np.median(X.ravel()) _, median = _spatial_median(X) assert_array_equal(median, true_median) def test_spatial_median_2d(): X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2) _, median = _spatial_median(X, max_iter=100, tol=1.e-6) def cost_func(y): dists = np.array([norm(x - y) for x in X]) return np.sum(dists) # Check if median is solution of the Fermat-Weber location problem fermat_weber = fmin_bfgs(cost_func, median, disp=False) assert_array_almost_equal(median, fermat_weber) # Check when maximum iteration is exceeded a warning is emitted assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.) def test_theil_sen_1d(): X, y, w, c = gen_toy_problem_1d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(np.abs(lstq.coef_ - w), 0.9) # Check that Theil-Sen works theil_sen = TheilSenRegressor(random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_theil_sen_1d_no_intercept(): X, y, w, c = gen_toy_problem_1d(intercept=False) # Check that Least Squares fails lstq = LinearRegression(fit_intercept=False).fit(X, y) assert_greater(np.abs(lstq.coef_ - w - c), 0.5) # Check that Theil-Sen works theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w + c, 1) assert_almost_equal(theil_sen.intercept_, 0.) def test_theil_sen_2d(): X, y, w, c = gen_toy_problem_2d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(norm(lstq.coef_ - w), 1.0) # Check that Theil-Sen works theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_calc_breakdown_point(): bp = _breakdown_point(1e10, 2) assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6) @raises(ValueError) def test_checksubparams_negative_subpopulation(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_too_few_subsamples(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_too_many_subsamples(): X, y, w, c = gen_toy_problem_1d() TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y) @raises(ValueError) def test_checksubparams_n_subsamples_if_less_samples_than_features(): random_state = np.random.RandomState(0) n_samples, n_features = 10, 20 X = random_state.normal(size=(n_samples, n_features)) y = random_state.normal(size=n_samples) TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y) def test_subpopulation(): X, y, w, c = gen_toy_problem_4d() theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_subsamples(): X, y, w, c = gen_toy_problem_4d() theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y) lstq = LinearRegression().fit(X, y) # Check for exact the same results as Least Squares assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9) def test_verbosity(): X, y, w, c = gen_toy_problem_1d() # Check that Theil-Sen can be verbose with no_stdout_stderr(): TheilSenRegressor(verbose=True, random_state=0).fit(X, y) TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y) def test_theil_sen_parallel(): X, y, w, c = gen_toy_problem_2d() # Check that Least Squares fails lstq = LinearRegression().fit(X, y) assert_greater(norm(lstq.coef_ - w), 1.0) # Check that Theil-Sen works theil_sen = TheilSenRegressor(n_jobs=-1, random_state=0, max_subpopulation=2e3).fit(X, y) assert_array_almost_equal(theil_sen.coef_, w, 1) assert_array_almost_equal(theil_sen.intercept_, c, 1) def test_less_samples_than_features(): random_state = np.random.RandomState(0) n_samples, n_features = 10, 20 X = random_state.normal(size=(n_samples, n_features)) y = random_state.normal(size=n_samples) # Check that Theil-Sen falls back to Least Squares if fit_intercept=False theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y) lstq = LinearRegression(fit_intercept=False).fit(X, y) assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12) # Check fit_intercept=True case. This will not be equal to the Least # Squares solution since the intercept is calculated differently. theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y) y_pred = theil_sen.predict(X) assert_array_almost_equal(y_pred, y, 12)
bsd-3-clause
architecture-building-systems/CityEnergyAnalyst
cea/analysis/multicriteria/main.py
2
5584
""" Multi criteria decision analysis """ import pandas as pd import cea.config import cea.inputlocator __author__ = "Jimeno A. Fonseca" __copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich" __credits__ = ["Jimeno A. Fonseca", "Shanshan Hsieh", "Sreepathi Bhargava Krishna"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "Daren Thomas" __email__ = "cea@arch.ethz.ch" __status__ = "Production" def multi_criteria_main(locator, generation, weight_annualized_capital_costs, weight_total_capital_costs, weight_annual_operation_costs, weight_annual_emissions, ): # local variables compiled_data_df = pd.read_csv(locator.get_optimization_generation_total_performance_pareto(generation)) # normalize data compiled_data_df = normalize_compiled_data(compiled_data_df) # rank data compiled_data_df = rank_normalized_data(compiled_data_df, weight_annualized_capital_costs, weight_total_capital_costs, weight_annual_operation_costs, weight_annual_emissions) compiled_data_df.to_csv(locator.get_multi_criteria_analysis(generation), index=False) return def rank_normalized_data(compiled_data_df, weight_annualized_capital_costs, weight_total_capital_costs, weight_annual_operation_costs, weight_annual_emissions): compiled_data_df['TAC_rank'] = compiled_data_df['normalized_TAC'].rank(ascending=True) compiled_data_df['GHG_rank'] = compiled_data_df['normalized_emissions'].rank(ascending=True) ## user defined mcda compiled_data_df['user_MCDA'] = (compiled_data_df['normalized_Capex_total'] * weight_total_capital_costs + compiled_data_df['normalized_Opex'] * weight_annual_operation_costs + compiled_data_df['normalized_TAC'] * weight_annualized_capital_costs + compiled_data_df['normalized_emissions'] * weight_annual_emissions) compiled_data_df['user_MCDA_rank'] = compiled_data_df['user_MCDA'].rank(ascending=True) return compiled_data_df def normalize_compiled_data(compiled_data_df): # TAC if (max(compiled_data_df['TAC_sys_USD']) - min(compiled_data_df['TAC_sys_USD'])) > 1E-8: normalized_TAC = (compiled_data_df['TAC_sys_USD'] - min(compiled_data_df['TAC_sys_USD'])) / ( max(compiled_data_df['TAC_sys_USD']) - min(compiled_data_df['TAC_sys_USD'])) else: normalized_TAC = [1] * len(compiled_data_df['TAC_sys_USD']) # emission if (max(compiled_data_df['GHG_sys_tonCO2']) - min(compiled_data_df['GHG_sys_tonCO2'])) > 1E-8: normalized_emissions = (compiled_data_df['GHG_sys_tonCO2'] - min( compiled_data_df['GHG_sys_tonCO2'])) / ( max(compiled_data_df['GHG_sys_tonCO2']) - min( compiled_data_df['GHG_sys_tonCO2'])) else: normalized_emissions = [1] * len(compiled_data_df['GHG_sys_tonCO2']) # capex if (max(compiled_data_df['Capex_total_sys_USD']) - min(compiled_data_df['Capex_total_sys_USD'])) > 1E-8: normalized_Capex_total = (compiled_data_df['Capex_total_sys_USD'] - min( compiled_data_df['Capex_total_sys_USD'])) / ( max(compiled_data_df['Capex_total_sys_USD']) - min( compiled_data_df['Capex_total_sys_USD'])) else: normalized_Capex_total = [1] * len(compiled_data_df['Capex_total_sys_USD']) # opex if (max(compiled_data_df['Opex_a_sys_USD']) - min(compiled_data_df['Opex_a_sys_USD'])) > 1E-8: normalized_Opex = (compiled_data_df['Opex_a_sys_USD'] - min(compiled_data_df['Opex_a_sys_USD'])) / ( max(compiled_data_df['Opex_a_sys_USD']) - min(compiled_data_df['Opex_a_sys_USD'])) else: normalized_Opex = [1] * len(compiled_data_df['Opex_a_sys_USD']) compiled_data_df = compiled_data_df.assign(normalized_TAC=normalized_TAC) compiled_data_df = compiled_data_df.assign(normalized_emissions=normalized_emissions) compiled_data_df = compiled_data_df.assign(normalized_Capex_total=normalized_Capex_total) compiled_data_df = compiled_data_df.assign(normalized_Opex=normalized_Opex) return compiled_data_df def main(config): locator = cea.inputlocator.InputLocator(config.scenario) print("Running multicriteria with scenario = %s" % config.scenario) print("Running multicriteria for generation = %s" % config.multi_criteria.generation) weight_annualized_capital_costs = config.multi_criteria.annualized_capital_costs weight_total_capital_costs = config.multi_criteria.total_capital_costs weight_annual_operation_costs = config.multi_criteria.annual_operation_costs weight_annual_emissions = config.multi_criteria.annual_emissions generation = config.multi_criteria.generation multi_criteria_main(locator, generation, weight_annualized_capital_costs, weight_total_capital_costs, weight_annual_operation_costs, weight_annual_emissions) if __name__ == '__main__': main(cea.config.Configuration())
mit
stuliveshere/SeismicProcessing2015
prac2_student/toolbox/processing.py
2
16997
import numpy as np from toolbox import io import toolbox import pylab from scipy.signal import butter, lfilter, convolve2d from scipy.interpolate import RectBivariateSpline as RBS from scipy.interpolate import interp2d from scipy.interpolate import interp1d from scipy.interpolate import griddata import matplotlib.patches as patches import numpy.ma as ma import sys import warnings warnings.filterwarnings("ignore") class DraggablePoint: lock = None #only one can be animated at a time def __init__(self, point): self.point = point self.press = None self.background = None def connect(self): 'connect to all the events we need' self.cidpress = self.point.figure.canvas.mpl_connect('button_press_event', self.on_press) self.cidrelease = self.point.figure.canvas.mpl_connect('button_release_event', self.on_release) self.cidmotion = self.point.figure.canvas.mpl_connect('motion_notify_event', self.on_motion) def on_press(self, event): if event.button == 3: if event.inaxes != self.point.axes: return if DraggablePoint.lock is not None: return contains, attrd = self.point.contains(event) if not contains: return self.press = (self.point.center), event.xdata, event.ydata DraggablePoint.lock = self # draw everything but the selected rectangle and store the pixel buffer canvas = self.point.figure.canvas axes = self.point.axes self.point.set_animated(True) canvas.draw() self.background = canvas.copy_from_bbox(self.point.axes.bbox) # now redraw just the rectangle axes.draw_artist(self.point) # and blit just the redrawn area canvas.blit(axes.bbox) def on_motion(self, event): if DraggablePoint.lock is not self: return if event.inaxes != self.point.axes: return self.point.center, xpress, ypress = self.press dx = event.xdata - xpress dy = event.ydata - ypress self.point.center = (self.point.center[0]+dx, self.point.center[1]+dy) canvas = self.point.figure.canvas axes = self.point.axes # restore the background region canvas.restore_region(self.background) # redraw just the current rectangle axes.draw_artist(self.point) # blit just the redrawn area canvas.blit(axes.bbox) def on_release(self, event): 'on release we reset the press data' if DraggablePoint.lock is not self: return self.press = None DraggablePoint.lock = None # turn off the rect animation property and reset the background self.point.set_animated(False) self.background = None # redraw the full figure self.point.figure.canvas.draw() def disconnect(self): 'disconnect all the stored connection ids' self.point.figure.canvas.mpl_disconnect(self.cidpress) self.point.figure.canvas.mpl_disconnect(self.cidrelease) self.point.figure.canvas.mpl_disconnect(self.cidmotion) def initialise(file): #intialise empty parameter dictionary #kwargs stands for keyword arguments kwargs = {} #load file dataset = toolbox.read(file) #allocate stuff #~ ns = kwargs['ns'] = dataset['ns'][0] dt = kwargs['dt'] = dataset['dt'][0]/1e6 #also add the time vector - it's useful later kwargs['times'] = np.arange(0, dt*ns, dt) dataset['trace'] /= np.amax(dataset['trace']) dataset['tracr'] = np.arange(dataset.size) kwargs['primary'] = 'cdp' kwargs['secondary'] = 'offset' kwargs['cdp'] = np.sort(np.unique(dataset['cdp'])) kwargs['step'] = 1 toolbox.scan(dataset) return dataset, kwargs @io def tar(data, **kwargs): #pull some values out of the #paramter dictionary gamma = kwargs['gamma'] t = kwargs['times'] #calculate the correction coeffieicnt r = np.exp(gamma * t) #applyt the correction to the data data['trace'] *= r return data @io def apply_statics(data, **kwargs): for trace in data: shift = np.round(trace['tstat']/(kwargs['dt']*1000)).astype(np.int) if np.abs(shift) > 0: if shift > 0: trace['trace'][-shift:] = 0 if shift < 0: trace['trace'][:-shift] = 0 trace['trace'] = np.roll(trace['trace'] , shift) return data def build_vels(vels, **kwargs): from scipy import interpolate cdps = np.array(kwargs['cdp']) times = np.array(kwargs['times']) keys = vels.keys() x = [] t = [] values = [] for i in vels.items(): cdp = i[0] picks= i[1] for pick in picks: x.append(cdp) t.append(pick[1]) values.append(pick[0]) grid_x, grid_y = np.meshgrid(cdps, times) #top left x.append(min(cdps)) t.append(min(times)) values.append(min(values)) #top right t.append(min(times)) x.append(max(cdps)) values.append(min(values)) #bottom left x.append(min(cdps)) t.append(max(times)) values.append(max(values)) #bottom right t.append(max(times)) x.append(max(cdps)) values.append(max(values)) zi = pylab.griddata(x, t, values, grid_x, grid_y, interp='linear') return zi.T def _nmo_calc(tx, vels, offset): '''calculates the zero offset time''' t0 = np.sqrt(tx*tx - (offset*offset)/(vels*vels)) return t0 @io def old_nmo(dataset, **kwargs): if 'smute' not in kwargs.keys(): kwargs['smute'] = 10000. ns = kwargs['ns'] dt = kwargs['dt'] tx = kwargs['times'] minCdp = np.amin(dataset['cdp']) counter = 0 ntraces = dataset.size print "moving out %d traces" %ntraces result = dataset.copy() result['trace'] *= 0 for i in range(dataset.size): trace = dataset[i] counter += 1 if counter > 1000: ntraces -= counter counter = 0 print ntraces aoffset = np.abs(trace['offset'].astype(np.float)) cdp = trace['cdp'] vel = kwargs['vels'][cdp - minCdp] #calculate time shift for each sample in trac t0 = _nmo_calc(tx, vel, aoffset) t0 = np.nan_to_num(t0) #calculate stretch between each sample stretch = 100.0*(np.pad(np.diff(t0),(0,1), 'reflect')-dt)/dt mute = kwargs['smute'] filter = [(stretch >0.0) & ( stretch < mute)] #interpolate result[i]['trace'] = np.interp(tx, t0, trace['trace']) * filter return result @io def nmo(dataset, **kwargs): dataset.sort(order='cdp') cdps = np.unique(dataset['cdp']) minCdp = cdps[0] times = kwargs['times'] dt = kwargs['dt'] ns = kwargs['ns'] nt = dataset.shape[0] traces = np.arange(nt) cdp_columns = dataset['cdp'] - minCdp vels = np.zeros_like(dataset['trace']) for i in range(cdp_columns.size): vels[i] = kwargs['vels'][cdp_columns[i]] tx = np.ones(dataset['trace'].shape) * times offset = dataset['offset'][:, None] t0 = _nmo_calc(tx, vels, offset) t0 = np.nan_to_num(t0) shifts = np.ones(dataset['trace'].shape) * (ns * dt * traces[:, None]) tx += shifts t0 += shifts result = np.interp(tx.ravel(), t0.ravel(), dataset['trace'].flatten()) dataset['trace'] = result.reshape(nt, ns) #calculate stretch between each sample stretch = 100.0*(np.abs(t0 - np.roll(t0, 1, axis=-1))/dt) stretch = np.nan_to_num(stretch) mute = kwargs['smute'] * 1.0 filter = [(stretch >0.0) & ( stretch < mute)][0] dataset['trace'] *= filter return dataset @io def axis_nmo(dataset, **kwargs): pass def _stack_gather(gather): '''stacks a single gather into a trace. uses header of first trace. normalises by the number of nonzero samples''' pilot = gather[np.argmin(gather['offset'])] norm = gather['trace'].copy() norm = np.nan_to_num(norm) norm = norm **0 norm = np.sum(norm, axis=-2) pilot['trace'] = np.sum(gather['trace'], axis=-2)/norm return pilot @io def stack(dataset, **kwargs): cdps = np.unique(dataset['cdp']) sutype = np.result_type(dataset) result = np.zeros(cdps.size, dtype=sutype) for index, cdp in enumerate(cdps): gather = dataset[dataset['cdp'] == cdp] trace = _stack_gather(gather) result[index] = trace return result def semb(workspace,**kwargs): print '' def onclick(e): if e.button == 1: print "(%.1f, %.3f)," %(e.xdata, e.ydata), w = np.abs(np.diff(ax.get_xlim())[0])/50. h = np.abs(np.diff(ax.get_ylim())[0])/50. circ= patches.Ellipse((e.xdata, e.ydata), width=w, height=h, fc='k') ax.add_patch(circ) dr = DraggablePoint(circ) dr.connect() drs.append(dr) fig.canvas.draw() vels = kwargs['velocities'] nvels = vels.size ns = kwargs['ns'] result = np.zeros((nvels,ns),'f') loc = np.mean(workspace['cdp']) for v in range(nvels): panel = workspace.copy() kwargs['vels'] = np.ones(kwargs['ns'], 'f') * vels[v] panel = nmo(panel, None, **kwargs) norm = panel['trace'].copy() norm[np.nonzero(norm)] = 1 n = np.sum(norm, axis=0) a = np.sum(panel['trace'], axis=0)**2 b = n * np.sum(panel['trace']**2, axis=0) window = kwargs['smoother']*1.0 kernel = np.ones(window)/window a = np.convolve(a, kernel, mode='same') b = np.convolve(b, kernel, mode='same') result[v:] = np.sqrt(a/b) pylab.imshow(result.T, aspect='auto', extent=(min(vels), max(vels),kwargs['ns']*kwargs['dt'],0.), cmap='jet') pylab.xlabel('velocity') pylab.ylabel('time') pylab.title("cdp = %d" %np.unique(loc)) pylab.colorbar() print "vels[%d]=" %loc, fig = pylab.gcf() ax = fig.gca() fig.canvas.mpl_connect('button_press_event', onclick) drs = [] pylab.show() #print '' #print "vels[%d]=" %loc, #for dr in drs: # print "(%.1f, %.3f)," %dr.point.center, def _lmo_calc(aoffset, velocity): t0 = -1.0*aoffset/velocity return t0 @io def lmo(dataset, **kwargs): offsets = np.unique(dataset['offset']) for offset in offsets: aoffset = np.abs(offset) shift = _lmo_calc(aoffset, kwargs['lmo']) shift = (shift*1000).astype(np.int) inds= [dataset['offset'] == offset] dataset['trace'][inds] = np.roll(dataset['trace'][inds], shift, axis=-1) #results[inds] return dataset @io def trace_mix(dataset, **kwargs): ns = kwargs['ns'] window = np.ones(kwargs['mix'], 'f')/kwargs['mix'] for i in range(ns): dataset['trace'][:,i] = np.convolve(dataset['trace'][:,i], window, mode='same') return dataset def butter_bandpass(lowcut, highcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = butter(order, [low, high], btype='band') return b, a def butter_bandpass_filter(data, lowcut, highcut, fs, order=5): b, a = butter_bandpass(lowcut, highcut, fs, order=order) y = lfilter(b, a, data) return y @io def bandpass(dataset, **kwargs): # Sample rate and desired cutoff frequencies (in Hz). fs = 1./kwargs['dt'] lowcut = kwargs['lowcut'] highcut = kwargs['highcut'] dataset['trace'] = butter_bandpass_filter(np.fliplr(dataset['trace']), lowcut, highcut, fs, order=3) dataset['trace'] = butter_bandpass_filter(np.fliplr(dataset['trace']), lowcut, highcut, fs, order=3) return dataset def fk_view(dataset, **kwargs): mid= dataset.size/2 f = np.abs(np.fft.rfft2(dataset['trace'])) freq = np.fft.rfftfreq(kwargs['ns'], d=kwargs['dt']) k = np.fft.rfftfreq(dataset.size, d=kwargs['dx']) kmax = k[-1] f[:mid] = f[:mid][::-1] f[mid:] = f[mid:][::-1] pylab.figure() pylab.imshow(f.T, aspect='auto', extent=[-1*kmax, kmax, freq[-1], freq[0]]) pylab.colorbar() def fk_design(dataset, **kwargs): mid= dataset.size/2 f = np.abs(np.fft.rfft2(dataset['trace'])) freq = np.fft.rfftfreq(kwargs['ns'], d=kwargs['dt']) k = np.fft.rfftfreq(dataset.size, d=kwargs['dx']) k = k[:-1] kmax = k[-1] k_axis = np.hstack([k, k[::-1]])[:, None] column, row = np.indices(f.shape) row = row.astype(np.float) column = column.astype(np.float) column.fill(1.0) row.fill(1.0) row *= freq column *= k_axis m = row/column m[:mid] = m[:mid][::-1] m[mid:] = m[mid:][::-1] mask = m > kwargs['fkVelocity'] m[mask] = 1 m[~mask] = 0 window = kwargs['fkSmooth'] vec= np.ones(window)/(window *1.0) smoothed_m = np.apply_along_axis(lambda m: np.convolve(m, vec, mode='valid'), axis=-1, arr=m) valid = smoothed_m.shape[-1] m[:, :valid] = smoothed_m pylab.figure() pylab.imshow(m.T, aspect='auto', extent=[-1*kmax, kmax, freq[-1], freq[0]]) pylab.colorbar() z = m.copy() z[:mid] = z[:mid][::-1] z[mid:] = z[mid:][::-1] return z @io def fk_filter(dataset, **kwargs): for s in np.unique(dataset['fldr']): shot = dataset['trace'][dataset['fldr'] == s] filter = kwargs['fkFilter'] nt = shot.shape[0] delta = abs(nt - filter.shape[0]) if delta > 0: shot = np.vstack([shot, np.zeros_like(shot[:delta])]) f = np.fft.rfft2(shot) result = np.fft.irfft2(f*filter)[:nt] dataset['trace'] [dataset['fldr'] == s]= 0.0 dataset['trace'] [dataset['fldr'] == s]= result return dataset @io def trim(dataset, **kwargs): dataset['tstat'] = 0 model = kwargs['model'] cdps = np.unique(model['cdp']) start, end = (kwargs['gate'] /kwargs['dt']).astype(np.int) centre = kwargs['ns']/2 m = kwargs['maxshift'] for cdp in cdps: gather = dataset[dataset['cdp'] == cdp].copy() gather['trace'][:,:start] = 0 gather['trace'][:,end:] = 0 pilot = model['trace'][model['cdp'] == cdp].ravel() pilot[:start] = 0 pilot[end:] = 0 result = np.apply_along_axis(lambda m: np.correlate(m, pilot, mode='same'), axis=-1, arr=gather['trace']) result[:,:centre-m] = 0 result[:,centre+m+1:] = 0 peaks = np.argmax(np.abs(result), axis=-1) dataset['tstat'][dataset['cdp'] == cdp] = peaks dataset['tstat'] -= centre.astype(np.int16) dataset['tstat'] *= -1 return dataset
mit
hainm/statsmodels
statsmodels/tsa/base/datetools.py
27
10629
from statsmodels.compat.python import (lrange, lzip, lmap, string_types, callable, asstr, reduce, zip, map) import re import datetime from pandas import Period from pandas.tseries.frequencies import to_offset from pandas import datetools as pandas_datetools import numpy as np #NOTE: All of these frequencies assume end of period (except wrt time) class _freq_to_pandas_class(object): # being lazy, don't want to replace dictionary below def __getitem__(self, key): return to_offset(key) _freq_to_pandas = _freq_to_pandas_class() def _is_datetime_index(dates): if isinstance(dates[0], (datetime.datetime, Period)): return True # TimeStamp is a datetime subclass else: return False def _index_date(date, dates): """ Gets the index number of a date in a date index. Works in-sample and will return one past the end of the dates since prediction can start one out. Currently used to validate prediction start dates. If there dates are not of a fixed-frequency and date is not on the existing dates, then a ValueError is raised. """ if isinstance(date, string_types): date = date_parser(date) try: if hasattr(dates, 'indexMap'): # 0.7.x return dates.indexMap[date] else: date = dates.get_loc(date) try: # pandas 0.8.0 returns a boolean array len(date) return np.where(date)[0].item() except TypeError: # expected behavior return date except KeyError as err: freq = _infer_freq(dates) if freq is None: #TODO: try to intelligently roll forward onto a date in the # index. Waiting to drop pandas 0.7.x support so this is # cleaner to do. raise ValueError("There is no frequency for these dates and " "date %s is not in dates index. Try giving a " "date that is in the dates index or use " "an integer" % date) # we can start prediction at the end of endog if _idx_from_dates(dates[-1], date, freq) == 1: return len(dates) raise ValueError("date %s not in date index. Try giving a " "date that is in the dates index or use an integer" % date) def _date_from_idx(d1, idx, freq): """ Returns the date from an index beyond the end of a date series. d1 is the datetime of the last date in the series. idx is the index distance of how far the next date should be from d1. Ie., 1 gives the next date from d1 at freq. Notes ----- This does not do any rounding to make sure that d1 is actually on the offset. For now, this needs to be taken care of before you get here. """ return d1 + idx * _freq_to_pandas[freq] def _idx_from_dates(d1, d2, freq): """ Returns an index offset from datetimes d1 and d2. d1 is expected to be the last date in a date series and d2 is the out of sample date. Notes ----- Rounds down the index if the end date is before the next date at freq. Does not check the start date to see whether it is on the offest but assumes that it is. """ from pandas import DatetimeIndex return len(DatetimeIndex(start=d1, end=d2, freq = _freq_to_pandas[freq])) - 1 _quarter_to_day = { "1" : (3, 31), "2" : (6, 30), "3" : (9, 30), "4" : (12, 31), "I" : (3, 31), "II" : (6, 30), "III" : (9, 30), "IV" : (12, 31) } _mdays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _months_with_days = lzip(lrange(1,13), _mdays) _month_to_day = dict(zip(map(str,lrange(1,13)), _months_with_days)) _month_to_day.update(dict(zip(["I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X", "XI", "XII"], _months_with_days))) # regex patterns _y_pattern = '^\d?\d?\d?\d$' _q_pattern = ''' ^ # beginning of string \d?\d?\d?\d # match any number 1-9999, includes leading zeros (:?q) # use q or a : as a separator ([1-4]|(I{1,3}V?)) # match 1-4 or I-IV roman numerals $ # end of string ''' _m_pattern = ''' ^ # beginning of string \d?\d?\d?\d # match any number 1-9999, includes leading zeros (:?m) # use m or a : as a separator (([1-9][0-2]?)|(I?XI{0,2}|I?VI{0,3}|I{1,3})) # match 1-12 or # I-XII roman numerals $ # end of string ''' #NOTE: see also ts.extras.isleapyear, which accepts a sequence def _is_leap(year): year = int(year) return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) def date_parser(timestr, parserinfo=None, **kwargs): """ Uses dateutil.parser.parse, but also handles monthly dates of the form 1999m4, 1999:m4, 1999:mIV, 1999mIV and the same for quarterly data with q instead of m. It is not case sensitive. The default for annual data is the end of the year, which also differs from dateutil. """ flags = re.IGNORECASE | re.VERBOSE if re.search(_q_pattern, timestr, flags): y,q = timestr.replace(":","").lower().split('q') month, day = _quarter_to_day[q.upper()] year = int(y) elif re.search(_m_pattern, timestr, flags): y,m = timestr.replace(":","").lower().split('m') month, day = _month_to_day[m.upper()] year = int(y) if _is_leap(y) and month == 2: day += 1 elif re.search(_y_pattern, timestr, flags): month, day = 12, 31 year = int(timestr) else: if (hasattr(pandas_datetools, 'parser') and not callable(pandas_datetools.parser)): # exists in 0.8.0 pandas, but it's the class not the module return pandas_datetools.parser.parse(timestr, parserinfo, **kwargs) else: # 0.8.1 pandas version didn't import this into namespace from dateutil import parser return parser.parse(timestr, parserinfo, **kwargs) return datetime.datetime(year, month, day) def date_range_str(start, end=None, length=None): """ Returns a list of abbreviated date strings. Parameters ---------- start : str The first abbreviated date, for instance, '1965q1' or '1965m1' end : str, optional The last abbreviated date if length is None. length : int, optional The length of the returned array of end is None. Returns ------- date_range : list List of strings """ flags = re.IGNORECASE | re.VERBOSE #_check_range_inputs(end, length, freq) start = start.lower() if re.search(_m_pattern, start, flags): annual_freq = 12 split = 'm' elif re.search(_q_pattern, start, flags): annual_freq = 4 split = 'q' elif re.search(_y_pattern, start, flags): annual_freq = 1 start += 'a1' # hack if end: end += 'a1' split = 'a' else: raise ValueError("Date %s not understood" % start) yr1, offset1 = lmap(int, start.replace(":","").split(split)) if end is not None: end = end.lower() yr2, offset2 = lmap(int, end.replace(":","").split(split)) length = (yr2 - yr1) * annual_freq + offset2 elif length: yr2 = yr1 + length // annual_freq offset2 = length % annual_freq + (offset1 - 1) years = np.repeat(lrange(yr1+1, yr2), annual_freq).tolist() years = np.r_[[str(yr1)]*(annual_freq+1-offset1), years] # tack on first year years = np.r_[years, [str(yr2)]*offset2] # tack on last year if split != 'a': offset = np.tile(np.arange(1, annual_freq+1), yr2-yr1-1) offset = np.r_[np.arange(offset1, annual_freq+1).astype('a2'), offset] offset = np.r_[offset, np.arange(1,offset2+1).astype('a2')] date_arr_range = [''.join([i, split, asstr(j)]) for i,j in zip(years, offset)] else: date_arr_range = years.tolist() return date_arr_range def dates_from_str(dates): """ Turns a sequence of date strings and returns a list of datetime. Parameters ---------- dates : array-like A sequence of abbreviated dates as string. For instance, '1996m1' or '1996Q1'. The datetime dates are at the end of the period. Returns ------- date_list : array A list of datetime types. """ return lmap(date_parser, dates) def dates_from_range(start, end=None, length=None): """ Turns a sequence of date strings and returns a list of datetime. Parameters ---------- start : str The first abbreviated date, for instance, '1965q1' or '1965m1' end : str, optional The last abbreviated date if length is None. length : int, optional The length of the returned array of end is None. Examples -------- >>> import statsmodels.api as sm >>> dates = sm.tsa.datetools.date_range('1960m1', length=nobs) Returns ------- date_list : array A list of datetime types. """ dates = date_range_str(start, end, length) return dates_from_str(dates) def _add_datetimes(dates): return reduce(lambda x, y: y+x, dates) def _infer_freq(dates): maybe_freqstr = getattr(dates, 'freqstr', None) if maybe_freqstr is not None: return maybe_freqstr try: from pandas.tseries.api import infer_freq freq = infer_freq(dates) return freq except ImportError: pass timedelta = datetime.timedelta nobs = min(len(dates), 6) if nobs == 1: raise ValueError("Cannot infer frequency from one date") if hasattr(dates, 'values'): dates = dates.values # can't do a diff on a DateIndex diff = np.diff(dates[:nobs]) delta = _add_datetimes(diff) nobs -= 1 # after diff if delta == timedelta(nobs): #greedily assume 'D' return 'D' elif delta == timedelta(nobs + 2): return 'B' elif delta == timedelta(7*nobs): return 'W' elif delta >= timedelta(28*nobs) and delta <= timedelta(31*nobs): return 'M' elif delta >= timedelta(90*nobs) and delta <= timedelta(92*nobs): return 'Q' elif delta >= timedelta(365 * nobs) and delta <= timedelta(366 * nobs): return 'A' else: return
bsd-3-clause
robin-lai/scikit-learn
sklearn/tests/test_kernel_ridge.py
342
3027
import numpy as np import scipy.sparse as sp from sklearn.datasets import make_regression from sklearn.linear_model import Ridge from sklearn.kernel_ridge import KernelRidge from sklearn.metrics.pairwise import pairwise_kernels from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_array_almost_equal X, y = make_regression(n_features=10) Xcsr = sp.csr_matrix(X) Xcsc = sp.csc_matrix(X) Y = np.array([y, y]).T def test_kernel_ridge(): pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_csr(): pred = Ridge(alpha=1, fit_intercept=False, solver="cholesky").fit(Xcsr, y).predict(Xcsr) pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_csc(): pred = Ridge(alpha=1, fit_intercept=False, solver="cholesky").fit(Xcsc, y).predict(Xcsc) pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_singular_kernel(): # alpha=0 causes a LinAlgError in computing the dual coefficients, # which causes a fallback to a lstsq solver. This is tested here. pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X) kr = KernelRidge(kernel="linear", alpha=0) ignore_warnings(kr.fit)(X, y) pred2 = kr.predict(X) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_precomputed(): for kernel in ["linear", "rbf", "poly", "cosine"]: K = pairwise_kernels(X, X, metric=kernel) pred = KernelRidge(kernel=kernel).fit(X, y).predict(X) pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K) assert_array_almost_equal(pred, pred2) def test_kernel_ridge_precomputed_kernel_unchanged(): K = np.dot(X, X.T) K2 = K.copy() KernelRidge(kernel="precomputed").fit(K, y) assert_array_almost_equal(K, K2) def test_kernel_ridge_sample_weights(): K = np.dot(X, X.T) # precomputed kernel sw = np.random.RandomState(0).rand(X.shape[0]) pred = Ridge(alpha=1, fit_intercept=False).fit(X, y, sample_weight=sw).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y, sample_weight=sw).predict(X) pred3 = KernelRidge(kernel="precomputed", alpha=1).fit(K, y, sample_weight=sw).predict(K) assert_array_almost_equal(pred, pred2) assert_array_almost_equal(pred, pred3) def test_kernel_ridge_multi_output(): pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X) pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X) assert_array_almost_equal(pred, pred2) pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X) pred3 = np.array([pred3, pred3]).T assert_array_almost_equal(pred2, pred3)
bsd-3-clause
HeraclesHX/scikit-learn
examples/bicluster/bicluster_newsgroups.py
162
7103
""" ================================================================ Biclustering documents with the Spectral Co-clustering algorithm ================================================================ This example demonstrates the Spectral Co-clustering algorithm on the twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is excluded because it contains many posts containing nothing but data. The TF-IDF vectorized posts form a word frequency matrix, which is then biclustered using Dhillon's Spectral Co-Clustering algorithm. The resulting document-word biclusters indicate subsets words used more often in those subsets documents. For a few of the best biclusters, its most common document categories and its ten most important words get printed. The best biclusters are determined by their normalized cut. The best words are determined by comparing their sums inside and outside the bicluster. For comparison, the documents are also clustered using MiniBatchKMeans. The document clusters derived from the biclusters achieve a better V-measure than clusters found by MiniBatchKMeans. Output:: Vectorizing... Coclustering... Done in 9.53s. V-measure: 0.4455 MiniBatchKMeans... Done in 12.00s. V-measure: 0.3309 Best biclusters: ---------------- bicluster 0 : 1951 documents, 4373 words categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment bicluster 1 : 1165 documents, 3304 words categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage bicluster 2 : 2219 documents, 2830 words categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package bicluster 3 : 1860 documents, 2745 words categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes bicluster 4 : 12 documents, 155 words categories : 100% rec.sport.hockey words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved """ from __future__ import print_function print(__doc__) from collections import defaultdict import operator import re from time import time import numpy as np from sklearn.cluster.bicluster import SpectralCoclustering from sklearn.cluster import MiniBatchKMeans from sklearn.externals.six import iteritems from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.cluster import v_measure_score def number_aware_tokenizer(doc): """ Tokenizer that maps all numeric tokens to a placeholder. For many applications, tokens that begin with a number are not directly useful, but the fact that such a token exists can be relevant. By applying this form of dimensionality reduction, some methods may perform better. """ token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b') tokens = token_pattern.findall(doc) tokens = ["#NUMBER" if token[0] in "0123456789_" else token for token in tokens] return tokens # exclude 'comp.os.ms-windows.misc' categories = ['alt.atheism', 'comp.graphics', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc'] newsgroups = fetch_20newsgroups(categories=categories) y_true = newsgroups.target vectorizer = TfidfVectorizer(stop_words='english', min_df=5, tokenizer=number_aware_tokenizer) cocluster = SpectralCoclustering(n_clusters=len(categories), svd_method='arpack', random_state=0) kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000, random_state=0) print("Vectorizing...") X = vectorizer.fit_transform(newsgroups.data) print("Coclustering...") start_time = time() cocluster.fit(X) y_cocluster = cocluster.row_labels_ print("Done in {:.2f}s. V-measure: {:.4f}".format( time() - start_time, v_measure_score(y_cocluster, y_true))) print("MiniBatchKMeans...") start_time = time() y_kmeans = kmeans.fit_predict(X) print("Done in {:.2f}s. V-measure: {:.4f}".format( time() - start_time, v_measure_score(y_kmeans, y_true))) feature_names = vectorizer.get_feature_names() document_names = list(newsgroups.target_names[i] for i in newsgroups.target) def bicluster_ncut(i): rows, cols = cocluster.get_indices(i) if not (np.any(rows) and np.any(cols)): import sys return sys.float_info.max row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0] col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0] weight = X[rows[:, np.newaxis], cols].sum() cut = (X[row_complement[:, np.newaxis], cols].sum() + X[rows[:, np.newaxis], col_complement].sum()) return cut / weight def most_common(d): """Items of a defaultdict(int) with the highest values. Like Counter.most_common in Python >=2.7. """ return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True) bicluster_ncuts = list(bicluster_ncut(i) for i in range(len(newsgroups.target_names))) best_idx = np.argsort(bicluster_ncuts)[:5] print() print("Best biclusters:") print("----------------") for idx, cluster in enumerate(best_idx): n_rows, n_cols = cocluster.get_shape(cluster) cluster_docs, cluster_words = cocluster.get_indices(cluster) if not len(cluster_docs) or not len(cluster_words): continue # categories counter = defaultdict(int) for i in cluster_docs: counter[document_names[i]] += 1 cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name) for name, c in most_common(counter)[:3]) # words out_of_cluster_docs = cocluster.row_labels_ != cluster out_of_cluster_docs = np.where(out_of_cluster_docs)[0] word_col = X[:, cluster_words] word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) - word_col[out_of_cluster_docs, :].sum(axis=0)) word_scores = word_scores.ravel() important_words = list(feature_names[cluster_words[i]] for i in word_scores.argsort()[:-11:-1]) print("bicluster {} : {} documents, {} words".format( idx, n_rows, n_cols)) print("categories : {}".format(cat_string)) print("words : {}\n".format(', '.join(important_words)))
bsd-3-clause
degoldschmidt/pytrack-analysis
scripts/plot_trajectories.py
2
5487
from pytrack_analysis.profile import get_profile from pytrack_analysis.database import Experiment import pytrack_analysis.plot as plot from pytrack_analysis import Multibench import matplotlib.pyplot as plt from pytrack_analysis.yamlio import read_yaml from pytrack_analysis.array import rle import seaborn as sns import numpy as np import pandas as pd import os import os.path as op from scipy.stats import ranksums import argparse import textwrap defaults = { 'dpi': 900, 'fig_width': 5, 'fig_height': 2.5, 'n_rows': 1, 'n_cols': 2, 'outextension': 'png', 'outfile': 'out', } def parse_yaml(_file): out = read_yaml(_file) for k,v in defaults.items(): if k not in out.keys(): out[k] = v return out def main(): """ --- general parameters * """ ### CLI arguments parser = argparse.ArgumentParser() parser.add_argument('basedir', metavar='basedir', type=str, help='directory where your data files are') #parser.add_argument('infile', metavar='infile', type=str, help='yaml file for plotting') #parser.add_argument('--suffix', type=str) _base = parser.parse_args().basedir _result = op.join(_base, 'pytrack_res') rawfolder = op.join(_result, 'post_tracking') experiment = [_file for _file in os.listdir(rawfolder) if _file.endswith('csv') and not _file.startswith('.') and _file[:-3]+'yaml' in os.listdir(rawfolder)][0][:4] sessions = [_file[:-4] for _file in os.listdir(rawfolder) if experiment in _file and _file.endswith('csv') and not _file.startswith('.') and _file[:-3]+'yaml' in os.listdir(rawfolder)] statdf = pd.DataFrame(data={'duration': [], 'condition': [], 'temperature': [], 'substrate': []}) for i_ses, ses in enumerate(sessions): try: f, ax = plt.subplots(figsize=(10, 10)) ### getting data yamlfile = op.join(rawfolder, ses+'.yaml') meta = read_yaml(yamlfile) dfs = [] cols = [['elapsed_time', 'frame_dt', 'head_x', 'head_y', 'body_x', 'body_y'], ['etho']] for i, module in enumerate(['kinematics', 'classifier']): infolder = op.join(_result, module) _file = "{}_{}.csv".format(ses, module) dfs.append(pd.read_csv(op.join(infolder, _file), index_col='frame').loc[:,cols[i]]) df = pd.concat(dfs, axis=1) ax = plot.arena(meta["arena"], meta["food_spots"], ax=ax) x, y, tx, ty, etho, dt = np.array(df['head_x']), np.array(df['head_y']), np.array(df['body_x']), np.array(df['body_y']), np.array(df['etho']), np.array(df['frame_dt']) ends = 108100 x, y, tx, ty, etho, dt = x[:ends], y[:ends], tx[:ends], ty[:ends], etho[:ends], dt[:ends] ax.plot(x, y, '.', c='#747474', alpha=0.5, ms=3) l, p, v = rle(etho) print(i_ses, meta['condition']) for eachl, eachp, eachv in zip(l, p, v): dur = np.sum(dt[eachp:eachp+eachl+1]) if eachv == 4: sub = 'yeast' if eachv == 5: sub = 'sucrose' if eachv == 4 or eachv == 5: statdf = statdf.append({'duration': dur, 'condition': meta['condition'], 'temperature': meta['setup']['temperature'], 'substrate': sub}, ignore_index=True) if dur > 0.5: print('{}: {} s (len {} @{})'.format(sub, dur, eachl, eachp)) ax.plot(x[etho==4], y[etho==4], '.', c='#ffc632', alpha=0.5, ms=4) ax.plot(x[etho==5], y[etho==5], '.', c='#1faeff', alpha=0.5, ms=4) ax.plot(tx[etho==4], ty[etho==4], '.', c='#af7e00', alpha=0.5, ms=8) ax.plot(tx[etho==5], ty[etho==5], '.', c='#0068a2', alpha=0.5, ms=8) #ax.plot(tx[:ends], ty[:ends], '.', c='#be0000', alpha=0.5, ms=3) ### saving files plt.tight_layout() ax.set_title(meta['condition'], loc='left') _file = os.path.join(_result, 'plots', "head", '{}_head'.format(ses)) plt.savefig(_file+'.png', dpi=600) except FileNotFoundError: pass print(statdf) statdf.to_csv(op.join(_result, 'stats_dur.csv'), index_label='index') return 1 def replot(): """ --- general parameters * """ ### CLI arguments parser = argparse.ArgumentParser() parser.add_argument('basedir', metavar='basedir', type=str, help='directory where your data files are') #parser.add_argument('infile', metavar='infile', type=str, help='yaml file for plotting') #parser.add_argument('--suffix', type=str) _base = parser.parse_args().basedir _result = op.join(_base, 'pytrack_res') statdf = pd.read_csv(op.join(_result, 'stats_dur.csv'), index_col='index')##.query('duration > 0.75') f, ax = plt.subplots(figsize=(6, 2)) ax = sns.stripplot(x="duration", y="substrate", data=statdf, jitter=True, alpha=0.5, size=1, ax=ax) ax.set_xscale('log') sns.despine(ax=ax, left=True, trim=True) ax.set_ylabel('') ax.set_xlabel('duration [s]') plt.tight_layout() _file = os.path.join(_result, 'plots', 'micromov_durations') plt.savefig(_file+'.png', dpi=900) return 1 if __name__ == '__main__': # runs as benchmark test test = Multibench("", SILENT=False) test(replot) del test
gpl-3.0
pmneila/morphsnakes
examples.py
1
8158
import os import logging import numpy as np from imageio import imread import matplotlib from matplotlib import pyplot as plt import morphsnakes as ms # in case you are running on machine without display, e.g. server if os.environ.get('DISPLAY', '') == '': logging.warning('No display found. Using non-interactive Agg backend.') matplotlib.use('Agg') PATH_IMG_NODULE = 'images/mama07ORI.bmp' PATH_IMG_STARFISH = 'images/seastar2.png' PATH_IMG_LAKES = 'images/lakes3.jpg' PATH_IMG_CAMERA = 'images/camera.png' PATH_IMG_COINS = 'images/coins.png' PATH_ARRAY_CONFOCAL = 'images/confocal.npy' def visual_callback_2d(background, fig=None): """ Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 2D images. Parameters ---------- background : (M, N) array Image to be plotted as the background of the visual evolution. fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`. """ # Prepare the visual environment. if fig is None: fig = plt.figure() fig.clf() ax1 = fig.add_subplot(1, 2, 1) ax1.imshow(background, cmap=plt.cm.gray) ax2 = fig.add_subplot(1, 2, 2) ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1) plt.pause(0.001) def callback(levelset): if ax1.collections: del ax1.collections[0] ax1.contour(levelset, [0.5], colors='r') ax_u.set_data(levelset) fig.canvas.draw() plt.pause(0.001) return callback def visual_callback_3d(fig=None, plot_each=1): """ Returns a callback than can be passed as the argument `iter_callback` of `morphological_geodesic_active_contour` and `morphological_chan_vese` for visualizing the evolution of the levelsets. Only works for 3D images. Parameters ---------- fig : matplotlib.figure.Figure Figure where results will be drawn. If not given, a new figure will be created. plot_each : positive integer The plot will be updated once every `plot_each` calls to the callback function. Returns ------- callback : Python function A function that receives a levelset and updates the current plot accordingly. This can be passed as the `iter_callback` argument of `morphological_geodesic_active_contour` and `morphological_chan_vese`. """ from mpl_toolkits.mplot3d import Axes3D # PyMCubes package is required for `visual_callback_3d` try: import mcubes except ImportError: raise ImportError("PyMCubes is required for 3D `visual_callback_3d`") # Prepare the visual environment. if fig is None: fig = plt.figure() fig.clf() ax = fig.add_subplot(111, projection='3d') plt.pause(0.001) counter = [-1] def callback(levelset): counter[0] += 1 if (counter[0] % plot_each) != 0: return if ax.collections: del ax.collections[0] coords, triangles = mcubes.marching_cubes(levelset, 0.5) ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=triangles) plt.pause(0.1) return callback def rgb2gray(img): """Convert a RGB image to gray scale.""" return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2] def example_nodule(): logging.info('Running: example_nodule (MorphGAC)...') # Load the image. img = imread(PATH_IMG_NODULE)[..., 0] / 255.0 # g(I) gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=5.48) # Initialization of the level-set. init_ls = ms.circle_level_set(img.shape, (100, 126), 20) # Callback for visual plotting callback = visual_callback_2d(img) # MorphGAC. ms.morphological_geodesic_active_contour(gimg, iterations=45, init_level_set=init_ls, smoothing=1, threshold=0.31, balloon=1, iter_callback=callback) def example_starfish(): logging.info('Running: example_starfish (MorphGAC)...') # Load the image. imgcolor = imread(PATH_IMG_STARFISH) / 255.0 img = rgb2gray(imgcolor) # g(I) gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=2) # Initialization of the level-set. init_ls = ms.circle_level_set(img.shape, (163, 137), 135) # Callback for visual plotting callback = visual_callback_2d(imgcolor) # MorphGAC. ms.morphological_geodesic_active_contour(gimg, iterations=100, init_level_set=init_ls, smoothing=2, threshold=0.3, balloon=-1, iter_callback=callback) def example_coins(): logging.info('Running: example_coins (MorphGAC)...') # Load the image. img = imread(PATH_IMG_COINS) / 255.0 # g(I) gimg = ms.inverse_gaussian_gradient(img) # Manual initialization of the level set init_ls = np.zeros(img.shape, dtype=np.int8) init_ls[10:-10, 10:-10] = 1 # Callback for visual plotting callback = visual_callback_2d(img) # MorphGAC. ms.morphological_geodesic_active_contour(gimg, 230, init_ls, smoothing=1, threshold=0.69, balloon=-1, iter_callback=callback) def example_lakes(): logging.info('Running: example_lakes (MorphACWE)...') # Load the image. imgcolor = imread(PATH_IMG_LAKES)/255.0 img = rgb2gray(imgcolor) # MorphACWE does not need g(I) # Initialization of the level-set. init_ls = ms.circle_level_set(img.shape, (80, 170), 25) # Callback for visual plotting callback = visual_callback_2d(imgcolor) # Morphological Chan-Vese (or ACWE) ms.morphological_chan_vese(img, iterations=200, init_level_set=init_ls, smoothing=3, lambda1=1, lambda2=1, iter_callback=callback) def example_camera(): """ Example with `morphological_chan_vese` with using the default initialization of the level-set. """ logging.info('Running: example_camera (MorphACWE)...') # Load the image. img = imread(PATH_IMG_CAMERA)/255.0 # Callback for visual plotting callback = visual_callback_2d(img) # Morphological Chan-Vese (or ACWE) ms.morphological_chan_vese(img, 35, smoothing=3, lambda1=1, lambda2=1, iter_callback=callback) def example_confocal3d(): logging.info('Running: example_confocal3d (MorphACWE)...') # Load the image. img = np.load(PATH_ARRAY_CONFOCAL) # Initialization of the level-set. init_ls = ms.circle_level_set(img.shape, (30, 50, 80), 25) # Callback for visual plotting callback = visual_callback_3d(plot_each=20) # Morphological Chan-Vese (or ACWE) ms.morphological_chan_vese(img, iterations=150, init_level_set=init_ls, smoothing=1, lambda1=1, lambda2=2, iter_callback=callback) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) example_nodule() example_starfish() example_coins() example_lakes() example_camera() # Uncomment the following line to see a 3D example # This is skipped by default since mplot3d is VERY slow plotting 3d meshes # example_confocal3d() logging.info("Done.") plt.show()
bsd-3-clause
ThomasMiconi/htmresearch
projects/sequence_prediction/continuous_sequence/plot_noisy_taxi_experiment.py
12
5167
import numpy as np import pandas as pd from matplotlib import pyplot as plt import matplotlib as mpl from plot import computeSquareDeviation from plot import ExperimentResult from plot import computeLikelihood from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder mpl.rcParams['pdf.fonttype'] = 42 plt.ion() plt.close('all') def computeAltMAPE(truth, prediction, startFrom=0): return np.nanmean(np.abs(truth[startFrom:] - prediction[startFrom:]))/np.nanmean(np.abs(truth[startFrom:])) def computeNRMSE(truth, prediction, startFrom=0): square_deviation = computeSquareDeviation(prediction, truth) square_deviation[:startFrom] = None return np.sqrt(np.nanmean(square_deviation))/np.nanstd(truth) # use datetime as x-axis dataSet = 'nyc_taxi' filePath = './data/' + dataSet + '.csv' data = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['datetime', 'value', 'timeofday', 'dayofweek']) xaxis_datetime = pd.to_datetime(data['datetime']) startFrom = 10000 noiseList = [0, 0.02, 0.04, 0.06, 0.08, 0.1] encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True) nrmseTM = pd.DataFrame([], columns=['TM']) mapeTM = pd.DataFrame([], columns=['TM', 'GT']) negLLTM = pd.DataFrame([], columns=['TM']) noiseStrengthTM = [] for noise in noiseList: if noise > 0: dataSet = 'nyc_taxi' + "noise_{:.2f}".format(noise) filePath = './prediction/nyc_taxi' + "noise_{:.2f}".format(noise) + '_TM_pred.csv' else: dataSet = 'nyc_taxi' filePath = './prediction/nyc_taxi_TM_pred.csv' predData_TM = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['step', 'value', 'prediction5']) noiseValue = (predData_TM['value'] - data['value']) / data['value'] noiseStrengthTM.append(np.std(noiseValue[8000:])) if not np.isclose(noiseStrengthTM[-1], float(noise), rtol=0.1): print "Warning: Estimated noise strength is different from the given noise" groundTruth = np.roll(data['value'], -5) tmTruth = np.roll(predData_TM['value'], -5) predDataTMFiveStep = np.array(predData_TM['prediction5']) nrmse = computeNRMSE(tmTruth, predDataTMFiveStep, startFrom) altMAPE = computeAltMAPE(tmTruth, predDataTMFiveStep, startFrom) mapeGroundTruth = computeAltMAPE(tmTruth, groundTruth, startFrom) tm_prediction = np.load('./result/'+dataSet+'TMprediction.npy') tmTruth = np.load('./result/' + dataSet + 'TMtruth.npy') negLL = computeLikelihood(tm_prediction, tmTruth, encoder) negLL = np.nanmean(negLL[startFrom:]) nrmseTM = pd.concat([nrmseTM, pd.DataFrame([nrmse], columns=['TM'])]) mapeTM = pd.concat([mapeTM, pd.DataFrame(np.reshape(np.array([altMAPE, mapeGroundTruth]), newshape=(1,2)), columns=['TM', 'GT'])]) negLLTM = pd.concat([negLLTM, pd.DataFrame([negLL], columns=['TM'])]) lstmExptDir = 'results/nyc_taxi_experiment_continuous_likelihood_noise/' noiseList = ['0.0', '0.020', '0.040', '0.060', '0.080', '0.10'] negLLLSTM = pd.DataFrame([], columns=['LSTM']) noiseStrengthLSTM = [] for noise in noiseList: experiment = lstmExptDir + 'noise' + noise expResult = ExperimentResult(experiment) truth = np.concatenate((np.zeros(5333), expResult.truth)) error = np.concatenate((np.zeros(5333), expResult.error)) noiseValue = (truth - data['value']) / data['value'] noiseStrengthLSTM.append(np.std(noiseValue[8000:])) if not np.isclose(noiseStrengthLSTM[-1], float(noise), rtol=0.1): print "Warning: Estimated noise strength is different from the given noise" negLL = np.nanmean(error[startFrom:]) negLLLSTM = pd.concat([negLLLSTM, pd.DataFrame([negLL], columns=['LSTM'])]) lstmExptDir = 'results/nyc_taxi_experiment_continuous_noise/' noiseList = ['0.0', '0.020', '0.040', '0.060', '0.080', '0.10'] mapeLSTM = pd.DataFrame([], columns=['LSTM']) nrmseLSTM = pd.DataFrame([], columns=['LSTM']) for noise in noiseList: experiment = lstmExptDir + 'noise' + noise expResult = ExperimentResult(experiment) altMAPE = computeAltMAPE(expResult.truth, expResult.predictions, startFrom) nrmse = computeNRMSE(expResult.truth, expResult.predictions, startFrom) nrmseLSTM = pd.concat([nrmseLSTM, pd.DataFrame([nrmse], columns=['LSTM'])]) mapeLSTM = pd.concat([mapeLSTM, pd.DataFrame([altMAPE], columns=['LSTM'])]) plt.figure() plt.plot(noiseList, mapeTM['TM']) plt.plot(noiseList, mapeLSTM) plt.xlabel(' Noise Amount ') plt.ylabel(' MAPE') plt.legend(['HTM', 'LSTM'], loc=2) plt.figure() plt.plot(noiseList, nrmseTM) plt.plot(noiseList, nrmseLSTM) plt.xlabel(' Noise Amount ') plt.legend(['HTM', 'LSTM'], loc=2) plt.ylabel(' NRMSE') plt.figure() plt.plot(noiseList, negLLTM) plt.plot(noiseList, negLLLSTM) plt.xlabel(' Noise Amount ') plt.legend(['HTM', 'LSTM'], loc=2) plt.ylabel(' negLL ') plt.figure() plt.plot(data['value']) plt.plot(predData_TM['value']) plt.legend(['Original', 'Noisy']) plt.title(" Example Noisy Data") plt.xlim([16000, 16500])
agpl-3.0
PanDAWMS/panda-bigmon-core
core/grafana/StaginDSProgress.py
1
6265
from requests import post, get from json import loads from core.settings.local import GRAFANA as token from django.http import JsonResponse from core.libs.exlib import dictfetchall from django.db import connection from django.utils import timezone from datetime import timedelta from core.settings import defaultDatetimeFormat from django.shortcuts import render_to_response from core.oauth.utils import login_customrequired from core.views import initRequest, setupView import pandas as pd def run_query(rules): base = "https://monit-grafana.cern.ch" url = "api/datasources/proxy/8428/_msearch" rulequery = "" for rule in rules: rulequery += " data.rule_id: %s OR" % rule rulequery = rulequery[:-3] paramQuery = """{"filter":[{"query_string":{"analyze_wildcard":true,"query":"data.event_type:rule_progress AND (%s)"}}]}""" % rulequery query = """{"search_type":"query_then_fetch","ignore_unavailable":true,"index":["monit_prod_ddm_enr_transfer*"]}\n{"size":500,"query":{"bool":"""+paramQuery+"""},"sort":{"metadata.timestamp":{"order":"desc","unmapped_type":"boolean"}},"script_fields":{},"docvalue_fields":["metadata.timestamp"]}\n""" headers = token headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' request_url = "%s/%s" % (base, url) r = post(request_url, headers=headers, data=query) resultdict = {} if r.ok: results = loads(r.text)['responses'][0]['hits']['hits'] for result in results: dictEntry = resultdict.get(result['_source']['data']['rule_id'], {}) dictEntry[result['_source']['data']['created_at']] = result['_source']['data']['progress'] resultdict[result['_source']['data']['rule_id']] = dictEntry result = resultdict else: result = None return result def __getRucioRuleByTaskID(taskid): new_cur = connection.cursor() new_cur.execute(""" SELECT RSE FROM ATLAS_DEFT.T_DATASET_STAGING where DATASET_STAGING_ID IN (select DATASET_STAGING_ID FROM ATLAS_DEFT.T_ACTION_STAGING where TASKID=%i)""" % int(taskid)) rucioRule = dictfetchall(new_cur) if rucioRule and len(rucioRule) > 0: return rucioRule[0]['RSE'] else: return None def __getRucioRulesBySourceSEAndTimeWindow(source, hours): new_cur = connection.cursor() new_cur.execute(""" SELECT RSE FROM ATLAS_DEFT.T_DATASET_STAGING where SOURCE_RSE='%s' and (START_TIME>TO_DATE('%s','YYYY-mm-dd HH24:MI:SS') or END_TIME is NULL)""" % (source, (timezone.now() - timedelta(hours=hours)).strftime(defaultDatetimeFormat))) """ SELECT t1.RSE, t2.taskid FROM ATLAS_DEFT.T_DATASET_STAGING t1 LEFT JOIN ATLAS_DEFT.t_production_task t2 ON t2.PRIMARY_INPUT=t1.DATASET and t1.SOURCE_RSE='BNL-OSG2_DATATAPE' and t1.START_TIME>TO_DATE('2019-07-10 12:57:54','YYYY-mm-dd HH24:MI:SS') """ rucioRulesRows = dictfetchall(new_cur) rucioRules = [] if rucioRulesRows and len(rucioRulesRows) > 0: for rucioRulesRow in rucioRulesRows: rucioRules.append(rucioRulesRow['RSE']) return rucioRules else: return None def getStageProfileData(request): valid, response = initRequest(request) RRules = [] #RuleToTasks = {} if 'jeditaskid' in request.session['requestParams']: rucioRule = __getRucioRuleByTaskID(int(request.session['requestParams']['jeditaskid'])) if rucioRule: RRules.append(rucioRule) #RuleToTasks[rucioRule] = int(request.session['requestParams']['jeditaskid']) elif ('stagesource' in request.session['requestParams'] and 'hours' in request.session['requestParams']): RRules = __getRucioRulesBySourceSEAndTimeWindow( request.session['requestParams']['stagesource'].strip().replace("'","''"), int(request.session['requestParams']['hours'])) chunksize = 50 chunks = [RRules[i:i + chunksize] for i in range(0, len(RRules), chunksize)] resDict = {} try: #TODO fix the query to Grafana for chunk in chunks: resDict = {**resDict, **run_query(chunk)} except: resDict = None """ s1 = pd.Series([0,1], index=list('AB')) s2 = pd.Series([2,3], index=list('AC')) result = pd.concat([s1, s2], join='outer', axis=1, sort=False) print(result) """ pandaDFs = {} RRuleNames = [] result = [] if resDict is not None: for RRule, progEvents in resDict.items(): timesList = list(progEvents.keys()) progList = list(progEvents.values()) pandasDF = pd.Series(progList, index=timesList) pandaDFs[RRule] = pandasDF RRuleNames.append(RRule) if pandaDFs: result = pd.concat(pandaDFs.values(), join='outer', axis=1, sort=True) result.index = pd.to_datetime(result.index) result = result.resample('15min').last().reset_index().fillna(method='ffill').fillna(0) result['index'] = result['index'].dt.strftime('%Y-%m-%d %H:%M:%S') result = [['TimeStamp',] + RRuleNames] + result.values.tolist() return JsonResponse(result, safe=False) @login_customrequired def getDATASetsProgressPlot(request): initRequest(request) query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=4, limit=9999999, querytype='task', wildCardExt=True) request.session['viewParams']['selection'] = '' reqparams = '' if 'jeditaskid' in request.session['requestParams']: reqparams = 'jeditaskid='+str(int(request.session['requestParams']['jeditaskid'])) elif ('stagesource' in request.session['requestParams'] and 'hours' in request.session['requestParams']): reqparams = 'stagesource='+request.session['requestParams']['stagesource'] + \ '&hours=' + request.session['requestParams']['hours'] data = { 'request': request, 'reqparams': reqparams, 'viewParams': request.session['viewParams'] if 'viewParams' in request.session else None, } response = render_to_response('DSProgressplot.html', data, content_type='text/html') #patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 5) return response
apache-2.0
DeplanckeLab/ASAP
R_Python/clustering_scanpy_opti.py
1
8322
# Imports import sys import os import time import h5py import numpy as np import loompy import json from scipy.sparse import issparse, coo_matrix, csr_matrix from sklearn.metrics import pairwise_distances from umap.umap_ import nearest_neighbors from umap.umap_ import fuzzy_simplicial_set # Arguments loom_file = 'MISSING' iAnnot = 'MISSING' oAnnot = 'MISSING' oJSON = 'MISSING' n_neighbors = 'MISSING' method = 'MISSING' metric = 'MISSING' random_state = 'MISSING' time_idle = 0 warnings = [] # Check input args if len(sys.argv) >= 2: loom_file = sys.argv[1] if len(sys.argv) >= 3: iAnnot = sys.argv[2] if len(sys.argv) >= 4: oAnnot = sys.argv[3] if len(sys.argv) >= 5: oJSON = sys.argv[4] if len(sys.argv) >= 6: method = sys.argv[5] if len(sys.argv) >= 7: n_neighbors = int(sys.argv[6]) if len(sys.argv) >= 8: metric = sys.argv[7]) if len(sys.argv) >= 9: random_state = int(sys.argv[8]) # Functions # Handling errors def error_json(message, jsonfile): print(message, file=sys.stderr) if not jsonfile.startswith('MISSING'): data_json = {} data_json['displayed_error'] = message with open(jsonfile, 'w') as outfile: json.dump(data_json, outfile) sys.exit() # Open the Loom file while handling potential locking def open_with_lock(loomfile, mode): global time_idle, oJSON while True: try: return h5py.File(loomfile, mode) except Exception as e: if not "unable to lock file" in format(e): error_json("Error opening Loom file:" + format(e), oJSON) print("Sleeping 1sec for file lock....") time_idle += 1 time.sleep(1) def _compute_connectivities_umap(knn_indices, knn_dists, n_obs, n_neighbors, set_op_mix_ratio=1.0, local_connectivity=1.0): X = coo_matrix(([], ([], [])), shape=(n_obs, 1)) connectivities = fuzzy_simplicial_set(X, n_neighbors, None, None, knn_indices=knn_indices, knn_dists=knn_dists, set_op_mix_ratio=set_op_mix_ratio, local_connectivity=local_connectivity) if isinstance(connectivities, tuple): # In umap-learn 0.4, this returns (result, sigmas, rhos) connectivities = connectivities[0] distances = _get_sparse_matrix_from_indices_distances_umap(knn_indices, knn_dists, n_obs, n_neighbors) return distances, connectivities.tocsr() def _get_sparse_matrix_from_indices_distances_umap(knn_indices, knn_dists, n_obs, n_neighbors): rows = np.zeros((n_obs * n_neighbors), dtype=np.int64) cols = np.zeros((n_obs * n_neighbors), dtype=np.int64) vals = np.zeros((n_obs * n_neighbors), dtype=np.float64) for i in range(knn_indices.shape[0]): for j in range(n_neighbors): if knn_indices[i, j] == -1: continue # We didn't get the full knn for i if knn_indices[i, j] == i: val = 0.0 else: val = knn_dists[i, j] rows[i * n_neighbors + j] = i cols[i * n_neighbors + j] = knn_indices[i, j] vals[i * n_neighbors + j] = val result = coo_matrix((vals, (rows, cols)), shape=(n_obs, n_obs)) result.eliminate_zeros() return result.tocsr() def get_igraph_from_adjacency(adjacency): import igraph as ig sources, targets = adjacency.nonzero() weights = adjacency[sources, targets] if isinstance(weights, np.matrix): weights = weights.A1 g = ig.Graph(directed=True) g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices g.add_edges(list(zip(sources, targets))) try: g.es['weight'] = weights except: pass if g.vcount() != adjacency.shape[0]: print( f'The constructed graph has only {g.vcount()} nodes. ' 'Your adjacency matrix contained redundant nodes.' ) return g def louvain(adjacency, resolution = 1.0, random_state = 0): import louvain print('running Louvain clustering using the "louvain" package of Traag (2017)') louvain.set_rng_seed(random_state) g = get_igraph_from_adjacency(adjacency) part = louvain.find_partition(g, louvain.RBConfigurationVertexPartition, resolution_parameter = resolution) groups = np.array(part.membership) return groups + 1 # because it starts at clust0 def leiden(adjacency, resolution = 1.0, random_state = 0): use_weights: bool = True import leidenalg print('running Leiden clustering') # convert adjacency to igraph g = get_igraph_from_adjacency(adjacency) # clustering proper part = leidenalg.find_partition(g, partition_type = leidenalg.RBConfigurationVertexPartition, resolution_parameter = resolution, weights = np.array(g.es['weight']).astype(np.float64), n_iterations = -1, seed = random_state) # store output into adata.obs groups = np.array(part.membership) return groups + 1 # because it starts at clust0 # Argument list print('Clustering optimized from scanpy - List of arguments:') print('1. Loom file:', loom_file) print('2. Metadata to read:', iAnnot) print('3. Metadata to write:', oAnnot) print('4. Output JSON file:', oJSON) print('5. Method to use [louvain, leiden]:', method) print('6. Number of neighbors:', n_neighbors) print('7. Distance metric:', metric) print('8. Random seed:', random_state) # Check arguments if(len(sys.argv) < 9): error_json("Some arguments are MISSING. Stopping...", oJSON) # Open Loom file in reading mode f = open_with_lock(loom_file, 'r') # Open dataset is_nn_computed = False if not iAnnot in f: error_json("This dataset is not present in the Loom file", oJSON) m = f[iAnnot][:,:] if n_neighbors > m.shape[0]: # very small datasets [0] = nb cells n_neighbors = 1 + int(0.5*m.shape[0]) warnings.append(f'n_obs too small: adjusting to `n_neighbors = {n_neighbors}`') graphAnnot = "connectivities_{}nn_graph".format(n_neighbors) # For storing the knn graph if "/col_graphs/" + graphAnnot in f: is_nn_computed = True f.close() # Close the Loom file (reading mode) if is_nn_computed: # If already computed/stored in .loom print('Loading ', n_neighbors,'-nearest-neighbor graph from .loom file...') ds = loompy.connect(loom_file) _connectivities = ds.col_graphs[graphAnnot].tocsr() # Need Compressed Sparse Row format, default is COO ds.close() else: # If never computed/stored in .loom for this k # Neighbor search (approx nearest neighbors) print('Computing approx.', n_neighbors,'-nearest-neighbor graph...') if m.shape[0] < 4096: # Nb cells m = pairwise_distances(m, metric=metric) metric = 'precomputed' knn_indices, knn_distances, forest = nearest_neighbors(m, n_neighbors, metric, metric_kwds = {}, angular = False, random_state = random_state) _distances, _connectivities = _compute_connectivities_umap(knn_indices, knn_distances, m.shape[0], n_neighbors) # Note: knn_indices is equal to the the graph computed in R print('Saving graph in .loom file...') f = h5py.File(loom_file, 'r+') # Just for obtaining the lock? Not sure loompy handle this f.close() ds = loompy.connect(loom_file) # I use loompy for saving/loading the graph, coz it is much easier ds.col_graphs[graphAnnot] = _connectivities ds.close() print('Now running clustering method on the computed graph...') if method == 'louvain': clusters = louvain(_connectivities) elif method == 'leiden': clusters = leiden(_connectivities) else: error_json("This method does not exist. Use one in [louvain, leiden].", oJSON) # Writing clustering in .loom file f = open_with_lock(loom_file, 'r+') if oAnnot in f: del f[oAnnot] # Delete output dataset if exists f.create_dataset(oAnnot, data=clusters, chunks=(min(clusters.shape[0], 64)), compression="gzip", compression_opts=2, dtype='i8') f.close() # Preparing JSON with results clusts, counts = np.unique(clusters, return_counts=True) data_json = {} data_json['time_idle'] = time_idle if len(warnings) != 0: data_json['warnings'] = warnings data_json['nber_clusters'] = clusts.shape[0] data_json['metadata'] = [{ 'name': oAnnot, 'on': 'CELL', 'type': 'DISCRETE', 'nber_cols' : clusters.shape[0], 'nber_rows' : 1, 'categories' : {str(a):int(b) for a,b in zip(clusts, counts)} }] # Write output.json file with open(oJSON, 'w') as outfile: json.dump(data_json, outfile)
gpl-3.0
Rhoana/rhoana
Renderer/blocking.py
1
11629
#------------------------- #3d Renderer #Daniel Miron #7/5/2013 # #------------------------- import h5py import numpy as np import sys import pickle from OpenGL.GLUT import * from OpenGL.GLU import * from OpenGL.GL import * import arcball as arc import matplotlib.pyplot as plt import cv2 class Viewer: def __init__(self, label_file, chunk_file): self.arcball = None self.label_file = h5py.File(label_file, 'r') self.ds = self.label_file["main"] self.rows = self.ds.shape[0] self.columns = self.ds.shape[1] self.layers = self.ds.shape[2] '''self.chunk_rows = self.ds.chunks[0] self.chunk_columns = self.ds.chunks[1] self.chunk_layers = self.ds.chunks[2]''' self.chunk_rows = 64 self.chunk_columns = 64 self.chunk_layers = 16 self.chunk_map = self.read_chunk_map(chunk_file) self.keys = 0 self.rotation_x = 0 self.rotation_y = 0 self.win_h = 0 self.win_w = 0 self.contours = [] self.left = None self.slice = None self.pick_location = (0,0,0) self.picking_file = open(r"C:\Users\DanielMiron\Documents\3d_rendering\picking.txt", "w") def create_arcball(self): arcball = arc.Arcball() #locate the arcball center at center of window with radius half the width arcball.place([self.win_w/2, self.win_h/2], self.win_w/2) return arcball def make_display_list(self): '''Creates a display list to draw a box and the data scaled to .9*the size of the window''' self.display_list = glGenLists(1) glNewList(self.display_list, GL_COMPILE) glMatrixMode(GL_MODELVIEW) glPushMatrix() glTranslatef(-.9, .9, .9) glScalef(1.8/self.columns, -1.8/self.rows, -1.8/self.layers) #draw the layers for cnt in self.contours: gluTessBeginPolygon(self.tesselator, None) gluTessBeginContour(self.tesselator) for vtx in cnt: gluTessVertex(self.tesselator, vtx, vtx) gluTessEndContour(self.tesselator) gluTessEndPolygon(self.tesselator) #make a box around the image self.axes() glBegin(GL_LINES) glColor3f(1.0, 0, 0) #x in red for line in self.x_axis: glVertex3f(line[0][0], line[0][1], line[0][2]) glVertex3f(line[1][0], line[1][1], line[1][2]) glColor3f(0,1.0, 0) #y in green for line in self.y_axis: glVertex3f(line[0][0], line[0][1], line[0][2]) glVertex3f(line[1][0], line[1][1], line[1][2]) glColor3f(0,0,1.0) #z in blue for line in self.z_axis: glVertex3f(line[0][0], line[0][1], line[0][2]) glVertex3f(line[1][0], line[1][1], line[1][2]) glEnd() #make a back panel for easy orientation glColor3f(.5, .5, .5) glBegin(GL_POLYGON) glVertex3f(*self.x_axis[2][0]) glVertex3f(*self.x_axis[2][1]) glVertex3f(*self.x_axis[3][1]) glVertex3f(*self.x_axis[3][0]) glEnd() glPopMatrix() glEndList() def vertex_callback(self, vertex): '''sets the color of a single vertex and draws it''' #scale by dim-1 to include black #multiply by -1 and add 1 to invert color axis glColor3f(1.0*vertex[0]/(self.columns-1), -1.0*vertex[1]/(self.rows-1)+1.0, -1.0*vertex[2]/(self.layers-1)+1.0) glVertex3f(*vertex) def draw(self): '''draws an image''' glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glLoadIdentity() gluLookAt(0, 0, 3, 0, 0, 2, 0,1,0) glMultMatrixd(self.arcball.matrix().T) self.draw_marker() glCallList(self.display_list) glutSwapBuffers() return def draw_marker(self): glMatrixMode(GL_MODELVIEW) glPushMatrix() location = self.pick_location glTranslatef(float(1.8*location[0])/self.columns-.9, -(float(1.8*location[1])/self.rows-.9), -(float(1.8*location[2])/self.layers-.9)) glScalef(1.8/self.layers, 1.8/self.layers, 1.8/self.layers) location = self.pick_location glColor3f(1-(1.0*location[0]/(self.columns-1)), 1-(-1.0*location[1]/(self.rows-1)+1.0), 1-(-1.0*location[2]/(self.layers-1)+1.0)) glutSolidSphere(10, 50, 50) glPopMatrix() def keyboard(self, key, x, y): return def on_click(self, button, state, x, y): #Left click for arcball rotation if (button == GLUT_LEFT_BUTTON and state == GLUT_DOWN): self.left = True #turn on dragging rotation self.arcball.down((x,y)) #right click to select a pixel location elif (button == GLUT_RIGHT_BUTTON and state == GLUT_DOWN): self.left = False #turn off dragging rotation self.pick_location = self.pick(x,y) self.has_marker = True self.slice = self.show_slice(self.pick_location).astype(np.uint8) def show_slice(self, location): '''displays a single selected z slice in 2-d''' full_layer = self.ds[:, :, location[2]][...] layer =np.zeros(np.shape(full_layer)) max_key = 0 #used to scale colors to 256 colors for key in self.keys: if key > max_key: max_key = key layer[full_layer == key] = key layer = 255*layer/key plt.imshow(layer) return layer def draw_slice(self): '''draws a single z slice''' if self.slice != None: width = float(np.shape(self.slice)[1]) height = float(np.shape(self.slice)[0]) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) #scale window to size of slice glPixelZoom(self.win_w/width, self.win_h/height) glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, self.slice) glutSwapBuffers() def pick(self, x,y): '''gets the (x,y,z) location in the full volume of a chosen pixel''' click_color = None click_color = glReadPixels(x,self.win_h-y, 1,1, GL_RGB, GL_FLOAT)[0][0] location = [int(click_color[0]*(self.columns-1)), int(-(click_color[1]-1)*(self.rows-1)), int(-(click_color[2]-1)*((self.layers-1)))] return location def on_drag(self, x, y): if self.left: self.arcball.drag((x,y)) self.draw() def main(self, window_height, window_width, keys, contour_file): self.keys = keys self.contours = self.load_contours(contour_file) self.win_h = window_height self.win_w = window_width self.arcball = self.create_arcball() glutInit(sys.argv) glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH) glutInitWindowSize(self.win_w, self.win_h) #width, height glutCreateWindow("Nerve Cord") glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(65, 1, 1, 10) glMatrixMode(GL_MODELVIEW) self.tesselator = gluNewTess() gluTessCallback(self.tesselator, GLU_TESS_BEGIN, glBegin) gluTessCallback(self.tesselator, GLU_TESS_END, glEnd) gluTessCallback(self.tesselator, GLU_TESS_VERTEX, self.vertex_callback) glEnable(GL_DEPTH_TEST) self.make_display_list() glutDisplayFunc(self.draw) glutKeyboardFunc(self.keyboard) glutMouseFunc(self.on_click) glutMotionFunc(self.on_drag) glutCreateWindow("single layer") glutDisplayFunc(self.draw_slice) glutMainLoop() return def get_contours(self, keys, contour_file): chunk_list = self.organize_chunks(keys) self.keys = keys for chunk in chunk_list: for layer in reversed(range(self.chunk_layers)): for key in keys: if not layer+chunk[2]>= self.layers: #make sure we stay within bounds labels = self.ds[chunk[0]:chunk[0]+self.chunk_rows, chunk[1]:chunk[1]+self.chunk_columns, chunk[2]+layer][...] labels[labels!=key] = 0 labels[labels==key] = 255 labels = labels.astype(np.uint8) buffer_array = np.zeros((np.shape(labels)[0] +2, np.shape(labels)[1]+2), np.uint8) #buffer by one pixel on each side buffer_array[1:-1, 1:-1] = labels contours, hierarchy = cv2.findContours(buffer_array, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if not contours==[]: contours_3d = [] for cnt in contours: cnt_3d = [] for vtx in cnt: cnt_3d += [[vtx[0][0]-1+chunk[1],vtx[0][1]-1+chunk[0], layer+chunk[2]]] #subtract 1 to adjust back after buffer contours_3d += [cnt_3d] self.contours +=contours_3d self.save_contours(contour_file) def organize_chunks(self, keys): chunk_list = [] for key in keys: chunk_list += self.chunk_map[key] chunk_list.sort(key=lambda x: x[2]) #sort w/respect to z chunk_list.reverse() #make back to front return chunk_list def read_chunk_map(self, chunk_file): return pickle.load(open(chunk_file, "rb")) def save_contours(self, contour_file): pickle.dump(self.contours, open(contour_file, "wb")) return def load_contours(self, contour_file): return pickle.load(open(contour_file, "rb")) def axes(self): self.x_axis = [[[0,0,0], [self.columns, 0,0]], [[0,self.rows,0], [self.columns, self.rows, 0]], [[0,0,self.layers], [self.columns,0,self.layers]], [[0, self.rows, self.layers], [self.columns, self.rows, self.layers]]] self.y_axis = [[[0,0,0], [0, self.rows,0]], [[self.columns,0,0],[self.columns, self.rows, 0]], [[0,0,self.layers], [0,self.rows, self.layers]], [[self.columns, 0, self.layers],[self.columns, self.rows, self.layers]]] self.z_axis = [[[0,0,0], [0,0,self.layers]], [[self.columns,0,0],[self.columns, 0, self.layers]], [[0, self.rows,0], [0, self.rows, self.layers]],[[self.columns, self.rows, 0],[self.columns, self.rows, self.layers]]] '''for lines in [self.x_axis, self.y_axis, self.z_axis]: for line in lines: for vtx in line: vtx[0] = 1.8*(float(vtx[0])/float(self.columns)-0.5) vtx[1] = -1.8*(float(vtx[1])/float(self.rows)-0.5) vtx[2] = -1.8*(float(vtx[2])/float(self.layers)-0.5)''' viewer = Viewer(r'C:\Users\DanielMiron\Documents\3d_rendering\labels_full.h5', r'C:\Users\DanielMiron\Documents\3d_rendering\label_full_chunk_map.p') viewer.main(1000,1000, [6642,4627], r'C:\Users\DanielMiron\Documents\3d_rendering\contours_full.p')
mit
jjx02230808/project0223
sklearn/manifold/isomap.py
229
7169
"""Isomap for manifold learning""" # Author: Jake Vanderplas -- <vanderplas@astro.washington.edu> # License: BSD 3 clause (C) 2011 import numpy as np from ..base import BaseEstimator, TransformerMixin from ..neighbors import NearestNeighbors, kneighbors_graph from ..utils import check_array from ..utils.graph import graph_shortest_path from ..decomposition import KernelPCA from ..preprocessing import KernelCenterer class Isomap(BaseEstimator, TransformerMixin): """Isomap Embedding Non-linear dimensionality reduction through Isometric Mapping Read more in the :ref:`User Guide <isomap>`. Parameters ---------- n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold eigen_solver : ['auto'|'arpack'|'dense'] 'auto' : Attempt to choose the most efficient solver for the given problem. 'arpack' : Use Arnoldi decomposition to find the eigenvalues and eigenvectors. 'dense' : Use a direct solver (i.e. LAPACK) for the eigenvalue decomposition. tol : float Convergence tolerance passed to arpack or lobpcg. not used if eigen_solver == 'dense'. max_iter : integer Maximum number of iterations for the arpack solver. not used if eigen_solver == 'dense'. path_method : string ['auto'|'FW'|'D'] Method to use in finding shortest path. 'auto' : attempt to choose the best algorithm automatically. 'FW' : Floyd-Warshall algorithm. 'D' : Dijkstra's algorithm. neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree'] Algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. kernel_pca_ : object `KernelPCA` object used to implement the embedding. training_data_ : array-like, shape (n_samples, n_features) Stores the training data. nbrs_ : sklearn.neighbors.NearestNeighbors instance Stores nearest neighbors instance, including BallTree or KDtree if applicable. dist_matrix_ : array-like, shape (n_samples, n_samples) Stores the geodesic distance matrix of training data. References ---------- .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric framework for nonlinear dimensionality reduction. Science 290 (5500) """ def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto', tol=0, max_iter=None, path_method='auto', neighbors_algorithm='auto'): self.n_neighbors = n_neighbors self.n_components = n_components self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.path_method = path_method self.neighbors_algorithm = neighbors_algorithm self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors, algorithm=neighbors_algorithm) def _fit_transform(self, X): X = check_array(X) self.nbrs_.fit(X) self.training_data_ = self.nbrs_._fit_X self.kernel_pca_ = KernelPCA(n_components=self.n_components, kernel="precomputed", eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter) kng = kneighbors_graph(self.nbrs_, self.n_neighbors, mode='distance') self.dist_matrix_ = graph_shortest_path(kng, method=self.path_method, directed=False) G = self.dist_matrix_ ** 2 G *= -0.5 self.embedding_ = self.kernel_pca_.fit_transform(G) def reconstruction_error(self): """Compute the reconstruction error for the embedding. Returns ------- reconstruction_error : float Notes ------- The cost function of an isomap embedding is ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` Where D is the matrix of distances for the input data X, D_fit is the matrix of distances for the output embedding X_fit, and K is the isomap kernel: ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)`` """ G = -0.5 * self.dist_matrix_ ** 2 G_center = KernelCenterer().fit_transform(G) evals = self.kernel_pca_.lambdas_ return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0] def fit(self, X, y=None): """Compute the embedding vectors for data X Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, precomputed tree, or NearestNeighbors object. Returns ------- self : returns an instance of self. """ self._fit_transform(X) return self def fit_transform(self, X, y=None): """Fit the model from data in X and transform X. Parameters ---------- X: {array-like, sparse matrix, BallTree, KDTree} Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new: array-like, shape (n_samples, n_components) """ self._fit_transform(X) return self.embedding_ def transform(self, X): """Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X: array-like, shape (n_samples, n_features) Returns ------- X_new: array-like, shape (n_samples, n_components) """ X = check_array(X) distances, indices = self.nbrs_.kneighbors(X, return_distance=True) #Create the graph of shortest distances from X to self.training_data_ # via the nearest neighbors of X. #This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: G_X = np.zeros((X.shape[0], self.training_data_.shape[0])) for i in range(X.shape[0]): G_X[i] = np.min((self.dist_matrix_[indices[i]] + distances[i][:, None]), 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X)
bsd-3-clause
smartscheduling/scikit-learn-categorical-tree
examples/calibration/plot_calibration_curve.py
225
5903
""" ============================== Probability Calibration curves ============================== When performing classification one often wants to predict not only the class label, but also the associated probability. This probability gives some kind of confidence on the prediction. This example demonstrates how to display how well calibrated the predicted probabilities are and how to calibrate an uncalibrated classifier. The experiment is performed on an artificial dataset for binary classification with 100.000 samples (1.000 of them are used for model fitting) with 20 features. Of the 20 features, only 2 are informative and 10 are redundant. The first figure shows the estimated probabilities obtained with logistic regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic calibration and sigmoid calibration. The calibration performance is evaluated with Brier score, reported in the legend (the smaller the better). One can observe here that logistic regression is well calibrated while raw Gaussian naive Bayes performs very badly. This is because of the redundant features which violate the assumption of feature-independence and result in an overly confident classifier, which is indicated by the typical transposed-sigmoid curve. Calibration of the probabilities of Gaussian naive Bayes with isotonic regression can fix this issue as can be seen from the nearly diagonal calibration curve. Sigmoid calibration also improves the brier score slightly, albeit not as strongly as the non-parametric isotonic regression. This can be attributed to the fact that we have plenty of calibration data such that the greater flexibility of the non-parametric model can be exploited. The second figure shows the calibration curve of a linear support-vector classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian naive Bayes: the calibration curve has a sigmoid curve, which is typical for an under-confident classifier. In the case of LinearSVC, this is caused by the margin property of the hinge loss, which lets the model focus on hard samples that are close to the decision boundary (the support vectors). Both kinds of calibration can fix this issue and yield nearly identical results. This shows that sigmoid calibration can deal with situations where the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC) but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes). """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD Style. import matplotlib.pyplot as plt from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression from sklearn.metrics import (brier_score_loss, precision_score, recall_score, f1_score) from sklearn.calibration import CalibratedClassifierCV, calibration_curve from sklearn.cross_validation import train_test_split # Create dataset of classification task with many redundant and few # informative features X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=10, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99, random_state=42) def plot_calibration_curve(est, name, fig_index): """Plot calibration curve for est w/o and with calibration. """ # Calibrated with isotonic calibration isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic') # Calibrated with sigmoid calibration sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid') # Logistic regression with no calibration as baseline lr = LogisticRegression(C=1., solver='lbfgs') fig = plt.figure(fig_index, figsize=(10, 10)) ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax2 = plt.subplot2grid((3, 1), (2, 0)) ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated") for clf, name in [(lr, 'Logistic'), (est, name), (isotonic, name + ' + Isotonic'), (sigmoid, name + ' + Sigmoid')]: clf.fit(X_train, y_train) y_pred = clf.predict(X_test) if hasattr(clf, "predict_proba"): prob_pos = clf.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = clf.decision_function(X_test) prob_pos = \ (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max()) print("%s:" % name) print("\tBrier: %1.3f" % (clf_score)) print("\tPrecision: %1.3f" % precision_score(y_test, y_pred)) print("\tRecall: %1.3f" % recall_score(y_test, y_pred)) print("\tF1: %1.3f\n" % f1_score(y_test, y_pred)) fraction_of_positives, mean_predicted_value = \ calibration_curve(y_test, prob_pos, n_bins=10) ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s (%1.3f)" % (name, clf_score)) ax2.hist(prob_pos, range=(0, 1), bins=10, label=name, histtype="step", lw=2) ax1.set_ylabel("Fraction of positives") ax1.set_ylim([-0.05, 1.05]) ax1.legend(loc="lower right") ax1.set_title('Calibration plots (reliability curve)') ax2.set_xlabel("Mean predicted value") ax2.set_ylabel("Count") ax2.legend(loc="upper center", ncol=2) plt.tight_layout() # Plot calibration cuve for Gaussian Naive Bayes plot_calibration_curve(GaussianNB(), "Naive Bayes", 1) # Plot calibration cuve for Linear SVC plot_calibration_curve(LinearSVC(), "SVC", 2) plt.show()
bsd-3-clause
ephes/scikit-learn
examples/model_selection/grid_search_text_feature_extraction.py
253
4158
""" ========================================================== Sample pipeline for text feature extraction and evaluation ========================================================== The dataset used in this example is the 20 newsgroups dataset which will be automatically downloaded and then cached and reused for the document classification example. You can adjust the number of categories by giving their names to the dataset loader or setting them to None to get the 20 of them. Here is a sample output of a run on a quad-core machine:: Loading 20 newsgroups dataset for categories: ['alt.atheism', 'talk.religion.misc'] 1427 documents 2 categories Performing grid search... pipeline: ['vect', 'tfidf', 'clf'] parameters: {'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07), 'clf__n_iter': (10, 50, 80), 'clf__penalty': ('l2', 'elasticnet'), 'tfidf__use_idf': (True, False), 'vect__max_n': (1, 2), 'vect__max_df': (0.5, 0.75, 1.0), 'vect__max_features': (None, 5000, 10000, 50000)} done in 1737.030s Best score: 0.940 Best parameters set: clf__alpha: 9.9999999999999995e-07 clf__n_iter: 50 clf__penalty: 'elasticnet' tfidf__use_idf: True vect__max_n: 2 vect__max_df: 0.75 vect__max_features: 50000 """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Mathieu Blondel <mathieu@mblondel.org> # License: BSD 3 clause from __future__ import print_function from pprint import pprint from time import time import logging from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.linear_model import SGDClassifier from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline print(__doc__) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') ############################################################################### # Load some categories from the training set categories = [ 'alt.atheism', 'talk.religion.misc', ] # Uncomment the following to do the analysis on all the categories #categories = None print("Loading 20 newsgroups dataset for categories:") print(categories) data = fetch_20newsgroups(subset='train', categories=categories) print("%d documents" % len(data.filenames)) print("%d categories" % len(data.target_names)) print() ############################################################################### # define a pipeline combining a text feature extractor with a simple # classifier pipeline = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier()), ]) # uncommenting more parameters will give better exploring power but will # increase processing time in a combinatorial way parameters = { 'vect__max_df': (0.5, 0.75, 1.0), #'vect__max_features': (None, 5000, 10000, 50000), 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams #'tfidf__use_idf': (True, False), #'tfidf__norm': ('l1', 'l2'), 'clf__alpha': (0.00001, 0.000001), 'clf__penalty': ('l2', 'elasticnet'), #'clf__n_iter': (10, 50, 80), } if __name__ == "__main__": # multiprocessing requires the fork to happen in a __main__ protected # block # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1) print("Performing grid search...") print("pipeline:", [name for name, _ in pipeline.steps]) print("parameters:") pprint(parameters) t0 = time() grid_search.fit(data.data, data.target) print("done in %0.3fs" % (time() - t0)) print() print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name]))
bsd-3-clause
cbertinato/pandas
pandas/tests/test_take.py
1
16636
from datetime import datetime import re import numpy as np import pytest from pandas._libs.tslib import iNaT import pandas.core.algorithms as algos import pandas.util.testing as tm @pytest.fixture(params=[True, False]) def writeable(request): return request.param # Check that take_nd works both with writeable arrays # (in which case fast typed memory-views implementation) # and read-only arrays alike. @pytest.fixture(params=[ (np.float64, True), (np.float32, True), (np.uint64, False), (np.uint32, False), (np.uint16, False), (np.uint8, False), (np.int64, False), (np.int32, False), (np.int16, False), (np.int8, False), (np.object_, True), (np.bool, False), ]) def dtype_can_hold_na(request): return request.param @pytest.fixture(params=[ (np.int8, np.int16(127), np.int8), (np.int8, np.int16(128), np.int16), (np.int32, 1, np.int32), (np.int32, 2.0, np.float64), (np.int32, 3.0 + 4.0j, np.complex128), (np.int32, True, np.object_), (np.int32, "", np.object_), (np.float64, 1, np.float64), (np.float64, 2.0, np.float64), (np.float64, 3.0 + 4.0j, np.complex128), (np.float64, True, np.object_), (np.float64, "", np.object_), (np.complex128, 1, np.complex128), (np.complex128, 2.0, np.complex128), (np.complex128, 3.0 + 4.0j, np.complex128), (np.complex128, True, np.object_), (np.complex128, "", np.object_), (np.bool_, 1, np.object_), (np.bool_, 2.0, np.object_), (np.bool_, 3.0 + 4.0j, np.object_), (np.bool_, True, np.bool_), (np.bool_, '', np.object_), ]) def dtype_fill_out_dtype(request): return request.param class TestTake: # Standard incompatible fill error. fill_error = re.compile("Incompatible type for fill_value") def test_1d_with_out(self, dtype_can_hold_na, writeable): dtype, can_hold_na = dtype_can_hold_na data = np.random.randint(0, 2, 4).astype(dtype) data.flags.writeable = writeable indexer = [2, 1, 0, 1] out = np.empty(4, dtype=dtype) algos.take_1d(data, indexer, out=out) expected = data.take(indexer) tm.assert_almost_equal(out, expected) indexer = [2, 1, 0, -1] out = np.empty(4, dtype=dtype) if can_hold_na: algos.take_1d(data, indexer, out=out) expected = data.take(indexer) expected[3] = np.nan tm.assert_almost_equal(out, expected) else: with pytest.raises(TypeError, match=self.fill_error): algos.take_1d(data, indexer, out=out) # No Exception otherwise. data.take(indexer, out=out) def test_1d_fill_nonna(self, dtype_fill_out_dtype): dtype, fill_value, out_dtype = dtype_fill_out_dtype data = np.random.randint(0, 2, 4).astype(dtype) indexer = [2, 1, 0, -1] result = algos.take_1d(data, indexer, fill_value=fill_value) assert ((result[[0, 1, 2]] == data[[2, 1, 0]]).all()) assert (result[3] == fill_value) assert (result.dtype == out_dtype) indexer = [2, 1, 0, 1] result = algos.take_1d(data, indexer, fill_value=fill_value) assert ((result[[0, 1, 2, 3]] == data[indexer]).all()) assert (result.dtype == dtype) def test_2d_with_out(self, dtype_can_hold_na, writeable): dtype, can_hold_na = dtype_can_hold_na data = np.random.randint(0, 2, (5, 3)).astype(dtype) data.flags.writeable = writeable indexer = [2, 1, 0, 1] out0 = np.empty((4, 3), dtype=dtype) out1 = np.empty((5, 4), dtype=dtype) algos.take_nd(data, indexer, out=out0, axis=0) algos.take_nd(data, indexer, out=out1, axis=1) expected0 = data.take(indexer, axis=0) expected1 = data.take(indexer, axis=1) tm.assert_almost_equal(out0, expected0) tm.assert_almost_equal(out1, expected1) indexer = [2, 1, 0, -1] out0 = np.empty((4, 3), dtype=dtype) out1 = np.empty((5, 4), dtype=dtype) if can_hold_na: algos.take_nd(data, indexer, out=out0, axis=0) algos.take_nd(data, indexer, out=out1, axis=1) expected0 = data.take(indexer, axis=0) expected1 = data.take(indexer, axis=1) expected0[3, :] = np.nan expected1[:, 3] = np.nan tm.assert_almost_equal(out0, expected0) tm.assert_almost_equal(out1, expected1) else: for i, out in enumerate([out0, out1]): with pytest.raises(TypeError, match=self.fill_error): algos.take_nd(data, indexer, out=out, axis=i) # No Exception otherwise. data.take(indexer, out=out, axis=i) def test_2d_fill_nonna(self, dtype_fill_out_dtype): dtype, fill_value, out_dtype = dtype_fill_out_dtype data = np.random.randint(0, 2, (5, 3)).astype(dtype) indexer = [2, 1, 0, -1] result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) assert ((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all()) assert ((result[3, :] == fill_value).all()) assert (result.dtype == out_dtype) result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) assert ((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all()) assert ((result[:, 3] == fill_value).all()) assert (result.dtype == out_dtype) indexer = [2, 1, 0, 1] result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) assert ((result[[0, 1, 2, 3], :] == data[indexer, :]).all()) assert (result.dtype == dtype) result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) assert ((result[:, [0, 1, 2, 3]] == data[:, indexer]).all()) assert (result.dtype == dtype) def test_3d_with_out(self, dtype_can_hold_na): dtype, can_hold_na = dtype_can_hold_na data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype) indexer = [2, 1, 0, 1] out0 = np.empty((4, 4, 3), dtype=dtype) out1 = np.empty((5, 4, 3), dtype=dtype) out2 = np.empty((5, 4, 4), dtype=dtype) algos.take_nd(data, indexer, out=out0, axis=0) algos.take_nd(data, indexer, out=out1, axis=1) algos.take_nd(data, indexer, out=out2, axis=2) expected0 = data.take(indexer, axis=0) expected1 = data.take(indexer, axis=1) expected2 = data.take(indexer, axis=2) tm.assert_almost_equal(out0, expected0) tm.assert_almost_equal(out1, expected1) tm.assert_almost_equal(out2, expected2) indexer = [2, 1, 0, -1] out0 = np.empty((4, 4, 3), dtype=dtype) out1 = np.empty((5, 4, 3), dtype=dtype) out2 = np.empty((5, 4, 4), dtype=dtype) if can_hold_na: algos.take_nd(data, indexer, out=out0, axis=0) algos.take_nd(data, indexer, out=out1, axis=1) algos.take_nd(data, indexer, out=out2, axis=2) expected0 = data.take(indexer, axis=0) expected1 = data.take(indexer, axis=1) expected2 = data.take(indexer, axis=2) expected0[3, :, :] = np.nan expected1[:, 3, :] = np.nan expected2[:, :, 3] = np.nan tm.assert_almost_equal(out0, expected0) tm.assert_almost_equal(out1, expected1) tm.assert_almost_equal(out2, expected2) else: for i, out in enumerate([out0, out1, out2]): with pytest.raises(TypeError, match=self.fill_error): algos.take_nd(data, indexer, out=out, axis=i) # No Exception otherwise. data.take(indexer, out=out, axis=i) def test_3d_fill_nonna(self, dtype_fill_out_dtype): dtype, fill_value, out_dtype = dtype_fill_out_dtype data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype) indexer = [2, 1, 0, -1] result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) assert ((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all()) assert ((result[3, :, :] == fill_value).all()) assert (result.dtype == out_dtype) result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) assert ((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all()) assert ((result[:, 3, :] == fill_value).all()) assert (result.dtype == out_dtype) result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value) assert ((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all()) assert ((result[:, :, 3] == fill_value).all()) assert (result.dtype == out_dtype) indexer = [2, 1, 0, 1] result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value) assert ((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all()) assert (result.dtype == dtype) result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value) assert ((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all()) assert (result.dtype == dtype) result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value) assert ((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all()) assert (result.dtype == dtype) def test_1d_other_dtypes(self): arr = np.random.randn(10).astype(np.float32) indexer = [1, 2, 3, -1] result = algos.take_1d(arr, indexer) expected = arr.take(indexer) expected[-1] = np.nan tm.assert_almost_equal(result, expected) def test_2d_other_dtypes(self): arr = np.random.randn(10, 5).astype(np.float32) indexer = [1, 2, 3, -1] # axis=0 result = algos.take_nd(arr, indexer, axis=0) expected = arr.take(indexer, axis=0) expected[-1] = np.nan tm.assert_almost_equal(result, expected) # axis=1 result = algos.take_nd(arr, indexer, axis=1) expected = arr.take(indexer, axis=1) expected[:, -1] = np.nan tm.assert_almost_equal(result, expected) def test_1d_bool(self): arr = np.array([0, 1, 0], dtype=bool) result = algos.take_1d(arr, [0, 2, 2, 1]) expected = arr.take([0, 2, 2, 1]) tm.assert_numpy_array_equal(result, expected) result = algos.take_1d(arr, [0, 2, -1]) assert result.dtype == np.object_ def test_2d_bool(self): arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool) result = algos.take_nd(arr, [0, 2, 2, 1]) expected = arr.take([0, 2, 2, 1], axis=0) tm.assert_numpy_array_equal(result, expected) result = algos.take_nd(arr, [0, 2, 2, 1], axis=1) expected = arr.take([0, 2, 2, 1], axis=1) tm.assert_numpy_array_equal(result, expected) result = algos.take_nd(arr, [0, 2, -1]) assert result.dtype == np.object_ def test_2d_float32(self): arr = np.random.randn(4, 3).astype(np.float32) indexer = [0, 2, -1, 1, -1] # axis=0 result = algos.take_nd(arr, indexer, axis=0) result2 = np.empty_like(result) algos.take_nd(arr, indexer, axis=0, out=result2) tm.assert_almost_equal(result, result2) expected = arr.take(indexer, axis=0) expected[[2, 4], :] = np.nan tm.assert_almost_equal(result, expected) # this now accepts a float32! # test with float64 out buffer out = np.empty((len(indexer), arr.shape[1]), dtype='float32') algos.take_nd(arr, indexer, out=out) # it works! # axis=1 result = algos.take_nd(arr, indexer, axis=1) result2 = np.empty_like(result) algos.take_nd(arr, indexer, axis=1, out=result2) tm.assert_almost_equal(result, result2) expected = arr.take(indexer, axis=1) expected[:, [2, 4]] = np.nan tm.assert_almost_equal(result, expected) def test_2d_datetime64(self): # 2005/01/01 - 2006/01/01 arr = np.random.randint(11045376, 11360736, (5, 3)) * 100000000000 arr = arr.view(dtype='datetime64[ns]') indexer = [0, 2, -1, 1, -1] # axis=0 result = algos.take_nd(arr, indexer, axis=0) result2 = np.empty_like(result) algos.take_nd(arr, indexer, axis=0, out=result2) tm.assert_almost_equal(result, result2) expected = arr.take(indexer, axis=0) expected.view(np.int64)[[2, 4], :] = iNaT tm.assert_almost_equal(result, expected) result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1)) result2 = np.empty_like(result) algos.take_nd(arr, indexer, out=result2, axis=0, fill_value=datetime(2007, 1, 1)) tm.assert_almost_equal(result, result2) expected = arr.take(indexer, axis=0) expected[[2, 4], :] = datetime(2007, 1, 1) tm.assert_almost_equal(result, expected) # axis=1 result = algos.take_nd(arr, indexer, axis=1) result2 = np.empty_like(result) algos.take_nd(arr, indexer, axis=1, out=result2) tm.assert_almost_equal(result, result2) expected = arr.take(indexer, axis=1) expected.view(np.int64)[:, [2, 4]] = iNaT tm.assert_almost_equal(result, expected) result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1)) result2 = np.empty_like(result) algos.take_nd(arr, indexer, out=result2, axis=1, fill_value=datetime(2007, 1, 1)) tm.assert_almost_equal(result, result2) expected = arr.take(indexer, axis=1) expected[:, [2, 4]] = datetime(2007, 1, 1) tm.assert_almost_equal(result, expected) def test_take_axis_0(self): arr = np.arange(12).reshape(4, 3) result = algos.take(arr, [0, -1]) expected = np.array([[0, 1, 2], [9, 10, 11]]) tm.assert_numpy_array_equal(result, expected) # allow_fill=True result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0) expected = np.array([[0, 1, 2], [0, 0, 0]]) tm.assert_numpy_array_equal(result, expected) def test_take_axis_1(self): arr = np.arange(12).reshape(4, 3) result = algos.take(arr, [0, -1], axis=1) expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]]) tm.assert_numpy_array_equal(result, expected) # allow_fill=True result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0) expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]]) tm.assert_numpy_array_equal(result, expected) class TestExtensionTake: # The take method found in pd.api.extensions def test_bounds_check_large(self): arr = np.array([1, 2]) with pytest.raises(IndexError): algos.take(arr, [2, 3], allow_fill=True) with pytest.raises(IndexError): algos.take(arr, [2, 3], allow_fill=False) def test_bounds_check_small(self): arr = np.array([1, 2, 3], dtype=np.int64) indexer = [0, -1, -2] with pytest.raises(ValueError): algos.take(arr, indexer, allow_fill=True) result = algos.take(arr, indexer) expected = np.array([1, 3, 2], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize('allow_fill', [True, False]) def test_take_empty(self, allow_fill): arr = np.array([], dtype=np.int64) # empty take is ok result = algos.take(arr, [], allow_fill=allow_fill) tm.assert_numpy_array_equal(arr, result) with pytest.raises(IndexError): algos.take(arr, [0], allow_fill=allow_fill) def test_take_na_empty(self): result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0) expected = np.array([0., 0.]) tm.assert_numpy_array_equal(result, expected) def test_take_coerces_list(self): arr = [1, 2, 3] result = algos.take(arr, [0, 0]) expected = np.array([1, 1]) tm.assert_numpy_array_equal(result, expected)
bsd-3-clause
JRoehrig/GIRS
girs/feat/layers.py
1
85451
from __future__ import print_function from builtins import zip from builtins import str from builtins import range from past.builtins import basestring from builtins import object import os import collections import numpy as np import pandas as pd import datetime from abc import ABCMeta from osgeo import gdal, ogr, osr from girs import srs from girs.feat.geom import is_topology_2d, is_topology_0d, geometries_to_geometry_collection from future.utils import with_metaclass ogr.UseExceptions() # ogr.DontUseExceptions() class DataFrameFeature(object): """ An OGRLayer in LayersSet can be transformed into a pandas DataFrame, each row corresponding to a feature. DataFrameFeature is used as geometry field in pandas DataFrame and Series. It encapsulates the geometry and shows its name in the pandas column. DataFrameFeature has two attributes: - the feature ID of the object in this layer. - A DataFrameLayer. Each row in the Series has one and the same DataFrameLayer object """ _format_to_export = {'kml': 'ExportToKML', 'isowkt': 'ExportToIsoWkt', 'gml': 'ExportToGML', 'wkb': 'ExportToWkb', 'json': 'ExportToJson', 'isowkb': 'ExportToIsoWkb', 'wkt': 'ExportToWkt'} geometry_fieldname = '_GEOM_' def __init__(self, data_frame_layer, feature_id): """ :param data_frame_layer: a layer :type data_frame_layer: OGRLayer :param feature_id: feature ID :type feature_id: int """ self.data_frame_layer = data_frame_layer self.feature_id = feature_id def __repr__(self): return ogr.GeometryTypeToName(self.get_layer().GetGeomType()) def get_layer(self): return self.data_frame_layer.get_layer() def get_geometry(self, fmt='wkb'): """Return the geometry as string. Default is wkb format :param fmt: format: gml, isowkb, isowkt, json, kml, wkb, or wkt :type fmt: str :return: OGRGeometry :rtype: str """ feat = self.get_layer().GetFeature(self.feature_id) g = feat.GetGeometryRef() return getattr(g, DataFrameFeature._format_to_export[fmt])() def get_geometry_type(self): """Return the layer geometry type (`OGRGeometry.GetGeomType()`) :return: geometry type :rtype: int """ return self.get_layer().GetGeomType() def get_id(self): """Return the feature ID :return: feature ID :rtype: int """ return self.feature_id def apply(self, method, *args, **kwargs): """Apply the given `method` with given `args` and `kwargs` to the geometry. In the following example, ``dfg.apply('GetArea')`` returns ``geom.GetArea()``. `dfg` is an instance of `DataFrameGeometry`. ``df['area_km2'] = df['geom'].apply(lambda dfg: dfg.apply('GetArea'))`` The OGRGeometry methods are described in - http://gdal.org/python/ and - http://www.gdal.org/classOGRGeometry.html: ``', '.join([m for m in dir(ogr.Geometry) if not m.startswith('_')])`` shows the available methods: :: AddGeometry, AddGeometryDirectly, AddPoint, AddPointM, AddPointZM, AddPoint_2D, Area, AssignSpatialReference, Boundary, Buffer, Centroid, Clone, CloseRings, Contains, ConvexHull, CoordinateDimension, Crosses, DelaunayTriangulation, Destroy, Difference, Disjoint, Distance, Distance3D, Empty, Equal, Equals, ExportToGML, ExportToIsoWkb, ExportToIsoWkt, ExportToJson, ExportToKML, ExportToWkb, ExportToWkt, FlattenTo2D, GetArea, GetBoundary, GetCoordinateDimension, GetCurveGeometry, GetDimension, GetEnvelope, GetEnvelope3D, GetGeometryCount, GetGeometryName, GetGeometryRef, GetGeometryType, GetLinearGeometry, GetM, GetPoint, GetPointCount, GetPointZM, GetPoint_2D, GetPoints, GetSpatialReference, GetX, GetY, GetZ, HasCurveGeometry, Intersect, Intersection, Intersects, Is3D, IsEmpty, IsMeasured, IsRing, IsSimple, IsValid, Length, Overlaps, PointOnSurface, Segmentize, Set3D, SetCoordinateDimension, SetMeasured, SetPoint, SetPointM, SetPointZM, SetPoint_2D, Simplify, SimplifyPreserveTopology, SymDifference, SymmetricDifference, Touches, Transform, TransformTo, Union, UnionCascaded, Value, Within, WkbSize, next, thisown :param method: an OGRGeometry method :type method: str :param args: args for the OGRGeometry method :param kwargs: kwargs for the OGRGeometry method :return: the return value of the applied method """ feat = self.get_layer().GetFeature(self.feature_id) g = feat.GetGeometryRef() result = getattr(g, method)(*args, **kwargs) del feat return result class DataFrameLayer(object): def __init__(self, lrs, layer_number): self.lrs = lrs # keep dataset alive self.layer_number = layer_number # Allocate this instance here in order to have only one reference along all geom column self.layer = self.lrs.get_layer(layer_number=self.layer_number) def get_layer(self): # Don't use self.lrs.get_layer(layer_number=self.layer_number) here. Doing that, each return from get_layer() # will have a different reference return self.layer def to_dtype(self): return FieldDefinition.oft2numpy(self.field_definition.oft_type) # t0 = self.field_definition.oft_type # return DataFrameOGRColumn._ogr_to_dtype_dict[t0] if t0 in DataFrameOGRColumn._ogr_to_dtype_dict else object def _get_data_frame_geometry_indices(df): """Return a list of geometry columns indices :param df: pandas DataFrame or Series :return: list of indices (int) """ try: df = df.to_frame() except AttributeError: pass return [idx for idx, field_name in enumerate(df.columns) if df[field_name].apply(lambda v: isinstance(v, DataFrameFeature)).any()] def _remove_data_frame_geometry_column(df): """Return a DataFrame with no geometry column :param df: pandas DataFrame or Series :return: pandas DataFrame """ try: df = df.to_frame() except AttributeError: pass idx = _get_data_frame_geometry_indices(df) if idx: columns = df.columns df.columns = list(range(len(columns))) columns = [c for c in columns if c not in columns[idx]] for c in sorted(idx, reverse=True): del df[c] df.columns = columns return df def _get_extension_shortname_dict(): def _insert_extensions(drv_dict, short_name, extensions=None): """ Extension is set to lower case :param drv_dict: :param short_name: :param extensions: :return: """ extensions = extensions.split() if extensions else [None] for ext in extensions: if ext: if ext.startswith('.'): ext = ext[1:] ext = ext.lower() for ext1 in [e for e in ext.split('/')]: if ext1 not in drv_dict: drv_dict[ext1] = [] drv_dict[ext].append(short_name) else: if None not in drv_dict: drv_dict[None] = [] drv_dict[None].append(short_name) gdal.AllRegister() drivers_dict = {} for i in range(ogr.GetDriverCount()): drv = ogr.GetDriver(i) metadata = drv.GetMetadata() driver_name = drv.GetName() if gdal.DMD_EXTENSION in metadata: _insert_extensions(drivers_dict, driver_name, metadata[gdal.DMD_EXTENSION]) elif gdal.DMD_EXTENSIONS in metadata: _insert_extensions(drivers_dict, driver_name, metadata[gdal.DMD_EXTENSIONS]) else: _insert_extensions(drivers_dict, driver_name) return drivers_dict class FeatDrivers(object): """ Example of driver names obtained with ``', '.join(FeatDrivers.get_driver_names())``: :: PCIDSK, netCDF, JP2OpenJPEG, JPEG2000, PDF, DB2ODBC, ESRI Shapefile, MapInfo File, UK .NTF, OGR_SDTS, S57, DGN, OGR_VRT, REC, Memory, BNA, CSV, GML, GPX, LIBKML, KML, GeoJSON, OGR_GMT, GPKG, SQLite, ODBC, WAsP, PGeo, MSSQLSpatial, PostgreSQL, OpenFileGDB, XPlane, DXF, Geoconcept, GeoRSS, GPSTrackMaker, VFK, PGDUMP, OSM, GPSBabel, SUA, OpenAir, OGR_PDS, WFS, HTF, AeronavFAA, Geomedia, EDIGEO, GFT, SVG, CouchDB, Cloudant, Idrisi, ARCGEN, SEGUKOOA, SEGY, XLS, ODS, XLSX, ElasticSearch, Walk, Carto, AmigoCloud, SXF, Selafin, JML, PLSCENES, CSW, VDV, TIGER, AVCBin, AVCE00, HTTP Unambiguous relation between file suffix and driver. The driver is retrieved directly from filename: :: Suffix Drivers 0 000 S57 1 bna BNA 2 csv CSV 3 dat XPlane 4 db SQLite 5 dgn DGN 6 dxf DXF 7 e00 AVCE00 8 gdb OpenFileGDB 9 geojson GeoJSON 10 gml GML 11 gmt OGR_GMT 12 gpkg GPKG 13 gpx GPX 14 gtm GPSTrackMaker 15 gtz GPSTrackMaker 16 gxt Geoconcept 17 jml JML 18 json GeoJSON 19 kmz LIBKML 20 map WAsP 21 mid MapInfo File 22 mif MapInfo File 23 nc netCDF 24 ods ODS 25 osm OSM 26 pbf OSM 27 pdf PDF 28 pix PCIDSK 29 rec REC 30 shp ESRI Shapefile 31 sql PGDUMP 32 sqlite SQLite 33 svg SVG 34 sxf SXF 35 tab MapInfo File 36 thf EDIGEO 37 topojson GeoJSON 38 vct Idrisi 39 vfk VFK 40 vrt OGR_VRT 41 x10 VDV 42 xls XLS 43 xlsx XLSX Ambiguous relation between file suffix and driver. The driver must be retrieved from filename and suffix: :: Suffix Drivers 0 jp2 JP2OpenJPEG, JPEG2000 1 kml LIBKML, KML 2 mdb PGeo, Geomedia 3 txt Geoconcept, VDV No file suffix. The driver must be retrieved from filename and suffix. In the case of Memory the filename can be empty: :: DB2ODBC, UK .NTF, OGR_SDTS, Memory, ODBC, MSSQLSpatial, PostgreSQL, GeoRSS, GPSBabel, SUA, OpenAir, OGR_PDS, WFS, HTF, AeronavFAA, GFT, CouchDB, Cloudant, ARCGEN, SEGUKOOA, SEGY, ElasticSearch, Walk, Carto, AmigoCloud, Selafin, PLSCENES, CSW, TIGER, AVCBin, HTTP """ _extension_to_driver_name = _get_extension_shortname_dict() @staticmethod def get_driver(source='', drivername=None): """Returns an object `osgeo.ogr.Driver` or `None`. Returns a Memory driver: 1- If drivername='Memory', 2- If drivername=``None`` and source=``None`` 3- If drivername=``None`` and source='' :param source: file name, data base name, or any other OGRDriver source :type source: str :param drivername: :type drivername: str :return: ``osgeo.ogr.Driver`` or ``None`` """ if drivername: drv = ogr.GetDriverByName(drivername) if source: extension0 = FeatDrivers.get_extension_by_driver_name(drivername) if extension0: extension1 = source.split('.')[-1].strip().lower() if extension0 != extension1: msg = 'Extension "{}" does not match required extension "{}"'.format(extension1, extension0) raise ValueError(msg) return drv elif source: extension = source.split('.')[-1].strip().lower() driver_names = FeatDrivers._extension_to_driver_name[extension] if len(driver_names) == 1: return ogr.GetDriverByName(driver_names[0]) else: raise ValueError('Filename extension ambiguous: {}'.format(', '.join(driver_names))) else: return ogr.GetDriverByName('Memory') @staticmethod def get_driver_names(): """Return all available ogr driver names :return: """ return [ogr.GetDriver(i).GetName() for i in range(ogr.GetDriverCount())] @staticmethod def get_extension_by_driver_name(drivername): """Return the extension of the driver given in `drivername` :param drivername: :return: """ for extension, drivernames in list(FeatDrivers._extension_to_driver_name.items()): if drivername in drivernames: return extension return None class FieldDefinition (object): oft2numpy_dict = { ogr.OFTInteger: np.int64, ogr.OFTInteger64: np.int64, ogr.OFTIntegerList: str, ogr.OFTInteger64List: str, ogr.OFTReal: np.float64, ogr.OFTRealList: str, ogr.OFTString: str, ogr.OFTStringList: str, ogr.OFTBinary: object, ogr.OFTDate: datetime.date, ogr.OFTTime: datetime.time, ogr.OFTDateTime: datetime.datetime, int: int, # FID has type long DataFrameFeature: object } numpy2oft_dict = { np.uint8: ogr.OFSTBoolean, np.uint16: ogr.OFTInteger, np.uint32: ogr.OFTInteger64, np.uint64: ogr.OFTInteger64List, np.int8: ogr.OFSTInt16, np.int16: ogr.OFSTInt16, np.int32: ogr.OFTInteger, np.int64: ogr.OFTInteger64, np.float64: ogr.OFTReal, np.datetime64: ogr.OFTDateTime, str: ogr.OFTString, DataFrameFeature: ogr.OFTString, list: list # np.uint128: ogr.OFTInteger64List, # np.int128: ogr.OFTInteger64List, # np.: ogr.OFTStringList, # np.: ogr.OFTBinary, # np.datetime64: ogr.OFTDate, # np.: ogr.OFTTime, # np.: ogr.OFTInteger64List, } def __init__(self, name, oft_type, width=None, precision=None, oft_subtype=None, nullable=True, default=None): """ :param name: name :type name: str :param oft_type: ogr.OFTString, ogr.OFTInteger, ogr.OFTReal, ... :type oft_type: int :param width: maximal number of characters (optional) :type width: int :param precision: number of digits after decimal point (optional) :type precision: int :param oft_subtype: ogr.OFSTBoolean, ... (optional) :type oft_subtype: str :param nullable: a NOT NULL constraint (optional) :type nullable: bool :param default: a default value (optional) """ self.name = name self.oft_type = oft_type self.width = width self.precision = precision self.oft_subtype = oft_subtype self.nullable = nullable self.default = default def __repr__(self): if not self.oft_subtype: return '{} ({})'.format(self.name, self.get_field_type_name()) else: return '{} ({})'.format(self.name, self.get_field_subtype_name()) def get_field_type_name(self): return ogr.GetFieldTypeName(self.oft_type) def get_field_subtype_name(self): return ogr.GetFieldSubTypeName(self.oft_subtype) def to_ogr(self): fd1 = ogr.FieldDefn(self.name, self.oft_type) if self.width: fd1.SetWidth(self.width) if self.precision: fd1.SetPrecision(self.precision) if self.oft_subtype: fd1.SetSubType(self.oft_subtype) if self.nullable: fd1.SetNullable(self.nullable) if self.default is not None: fd1.SetDefault(str(self.default)) return fd1 @staticmethod def from_ogr(fd): return FieldDefinition(name=fd.GetName(), oft_type=fd.GetType(), width=fd.GetWidth(), precision=fd.GetPrecision(), oft_subtype=fd.GetSubType(), nullable=fd.IsNullable(), default=fd.GetDefault()) @staticmethod def oft2numpy(oft_type): return FieldDefinition.oft2numpy_dict[oft_type] @staticmethod def numpy2oft(numpy_type): return FieldDefinition.numpy2oft_dict[numpy_type] class LayersSet(with_metaclass(ABCMeta, object)): """`LayersSet` is the basis class for all layers subclasses (read, update, and write). Many methods in `LayersSet` apply to a specific layer number, which is defined by the parameter `layer_number`. The default layer number is always zero, i.e., the first layer is used whenever `layer_number` is unset. `LayersSet` has an attribute `dataset`, which is an instance of OGRDataSource. A data source consists of one more layers (OGRLayer). If a layer instance is retrieved from `LayersSet` and the `LayersSet` instance is destroyed, an error will occur. The same applies to the features of a layer. Do not destroy or lose the scope of `LayersSet` before the layer instance or its features was used. .. seealso:: - LayersReader - LayersEditor - LayersUpdate - LayersWriter """ def __init__(self): """Initialize the abstract class by setting self.dataset = None """ self.dataset = None def get_source(self): """Return the dataset source :return: the dataset source (e.g., file name) :rtype: str """ return self.dataset.GetName() def get_description(self, layer_number=0): """Return the layer description for the given layer number Call `OGRLayer.GetDescription()` for the given layer number. :param layer_number: layer number. Default is zero :type layer_number: int :return: `OGRLayer.GetDescription()` for the given layer number. :rtype: """ return self.get_layer(layer_number).GetDescription() def show(self, layer_number=0, width=200, max_rows=60): """Show the layer as pandas DataFrame :param layer_number: layer number. Default is zero :type layer_number: int :param width: display width :type width: int :param max_rows: maximal number of rows displayed :type max_rows: int :return: """ pd.set_option("display.width", width) pd.set_option("display.max_rows", max_rows) print(self.data_frame(layer_number=layer_number)) pd.reset_option("display.width") pd.reset_option("display.max_rows") def copy(self, target='', **kwargs): """Return a copy of the layers The copy can be: - a full copy of all fields and field values - a copy of all field values of selected attribute fields - a copy of selected field values (filter) from selected attribute fields If target is given, save the copy before returning the new LayersWriter object. As per default, all fields are copied. The kwargs key `ofields` (output fields) defines the field name or list of field names to be copied. Per default this applies to layer number zero. Output fields for specific layers are defined by the layer number appended to `ofields`. For example, `ofields2=['NAME', 'AREA']` applies to layer number two (which is the third layer). `ofields0=['NAME', 'AREA']` is equivalent to `ofields=['NAME', 'AREA']`. As per default, all field values are copied. The kwargs key `filter` defines a filter to be applied to the features. Per default this applies to layer number zero. Filters for specific layers are defined by the layer number appended to `filter`. For example, `filter2=lambda a: a < 1000` applies to all features in layer number two (which is the third layer). If `filter` is used, then `ffields` (filter fields) must be also defined. In the example above this could be `filter2=lambda a: a < 1000, ffields2=['AREA']`. Another example: `lrs.copy(filter2=lambda a, n: a < 1000 and n.startswith('A'), ffields2=['AREA', 'NAME'])`. The filter parameters follow the same order of the list in `ffields`. :param target: file name. Default on memory :param **kwargs: ofields: name, list of names, list of (name, new name), dictionary {layer number: name}, dictionary {layer number: list of names}, or dictionary {layer number: list of (name, new name)} filter: filter function ffields: list of field names to apply the filter function, or a single field name :return: LayerWriter """ target = '' if target is None else target.strip() if target and not os.path.exists(os.path.dirname(target)): os.makedirs(os.path.dirname(target)) lrs_out = LayersWriter(source=target) number_of_layers = self.dataset.GetLayerCount() layers_dict = {layer_number: [None]*3 for layer_number in range(number_of_layers)} for k, parameters_list in list(kwargs.items()): if k.startswith('ofields'): idx = 0 elif k.startswith('filter'): idx = 1 elif k.startswith('ffields'): idx = 2 else: msg = 'option {} does not exist. Options are: target, ofields, filter, and ffields'.format(k) raise ValueError(msg) if not k[-1].isdigit(): layer_number, k = 0, k + '0' else: f, n = k, '' while f and f[-1].isdigit(): n, f = f[-1] + n, f[:-1] layer_number = int(n) if layer_number >= number_of_layers: msg = 'field {}: layer {} greater then the maximum number of layers'.format(k, str(layer_number)) raise ValueError(msg) layers_dict[layer_number][idx] = parameters_list def create_feature(lyr_out, fd0_out, feat_in, indices_in): feat_new = ogr.Feature(fd0_out) for idx_out, idx_in in enumerate(indices_in): feat_new.SetField(idx_out, feat_in.GetField(idx_in)) g = feat_in.GetGeometryRef() feat_new.SetGeometry(g) lyr_out.CreateFeature(feat_new) for ilc in range(number_of_layers): ly_in = self.dataset.GetLayer(ilc) ld_in = ly_in.GetLayerDefn() parameters = layers_dict[ilc] o_fields = parameters[0] f_filter = parameters[1] f_fields = parameters[2] if o_fields: # try: # o_fields = parameters[0][ilc] # except KeyError: # pass if isinstance(o_fields, basestring): o_fields = [o_fields] for i in range(len(o_fields)): if isinstance(o_fields[i], basestring): o_fields[i] = [o_fields[i], o_fields[i]] try: o_fields_names = dict() for fn in o_fields: if isinstance(fn, basestring): o_fields_names[fn] = fn elif len(fn) == 1: o_fields_names[fn[0]] = fn[0] else: o_fields_names[fn[0]] = fn[1] ofields_indices = [ld_in.GetFieldIndex(fn[0]) for fn in o_fields] except RuntimeError as e: msg = 'Error in ofields ({}): {}'.format(', '.join(str(f) for f in o_fields), str(e.message)) raise RuntimeError(msg) if -1 in ofields_indices: fn = o_fields[ofields_indices.index(-1)] if fn.upper() == 'FID': msg = 'ERROR: FID used as output field' else: msg = 'Output field {} does not exist'.format(fn) raise ValueError(msg) else: ofields_indices = list(range(ld_in.GetFieldCount())) o_fields_names = [ld_in.GetFieldDefn(i).GetName() for i in ofields_indices] o_fields_names = {fn: fn for fn in o_fields_names} # Get filter fields indices if f_fields: if isinstance(f_fields, basestring): f_fields = [f_fields] ffields_indices = sorted([ld_in.GetFieldIndex(fn) if fn != 'FID' else -2 for fn in f_fields]) if -1 in ffields_indices: msg = 'Filter field {} does not exist'.format(f_fields[ffields_indices.index(-1)]) raise ValueError(msg) else: ffields_indices = list() fd_out = [FieldDefinition.from_ogr(ld_in.GetFieldDefn(ifd)) for ifd in ofields_indices] for fd in fd_out: fd.name = o_fields_names[fd.name] ly_out = lrs_out.create_layer(str(ilc), ly_in.GetSpatialRef(), ly_in.GetGeomType(), fd_out) ld_out = ly_out.GetLayerDefn() # copy features ly_in.ResetReading() for feat in ly_in: if not f_filter or f_filter(*[feat.GetFID() if idx == -2 else feat.GetField(idx) for idx in ffields_indices]): create_feature(ly_out, ld_out, feat, ofields_indices) feat = None ly_in.ResetReading() lrs_out.dataset.FlushCache() return lrs_out def join(self, this_fieldname, other, other_fieldname, target=None, this_layer_number=0, other_layer_number=0): """Return a LayerWriter with joined features The (other) features to append to this features may be: 1- a pandas DataFrame or Series 2- a LayerSet 3- a source (file name): a LayerReader instance will be created In the last two cases: - the layer number of the LayerSet/LayerReader can be set with other_layer_number - the geometry will be removed from the DataFrame/Series :param this_fieldname: field name in this LayersSet to be used to join to the other object :type this_fieldname: str :param other: data to be joined to this LayersSet :type other: pandas.DataFrame, pandas.Series, LayerSet, or LayersReader file name :param other_fieldname: field name used to join to this LayersSet :type other_fieldname: str :param target: target :type target: str :param this_layer_number: layer number of this LayerSet (default 0) :type this_layer_number: int :param other_layer_number: layer number of other LayerSet (default 0) :type other_layer_number: int :return: :rtype: """ df_this = self.data_frame(this_layer_number) try: lrs_other = LayersReader(other) df_other = lrs_other.data_frame(other_layer_number) del df_other[DataFrameFeature.geometry_fieldname] except RuntimeError: try: df_other = other.data_frame() del df_other[DataFrameFeature.geometry_fieldname] except AttributeError: df_other = other df_other = _remove_data_frame_geometry_column(df_other) df_this = df_this.set_index(this_fieldname) df_other = df_other.set_index(other_fieldname) df_this = pd.concat([df_this, df_other], axis=1).reset_index() # df_this = df_this.fillna(nan) df_this.columns = _get_unique_field_names(df_this.columns) lrs_out = data_frame_to_layer(df_this, target=target) return lrs_out def data_frame(self, layer_number=0): """Return a layer as pandas DataFrame :param layer_number: layer number. Default is zero :type layer_number: int :return: OGRLayer as pandas DataFrame """ ld = self.get_layer_definition(layer_number) if not ld: return None nf = ld.GetFieldCount() # Get fields names and types from the layer column_name_geo = DataFrameFeature.geometry_fieldname column_name_fid = 'FID' names = [column_name_fid, column_name_geo] types = [int, DataFrameFeature] if nf > 0: fds = [FieldDefinition.from_ogr(ld.GetFieldDefn(i)) for i in range(ld.GetFieldCount())] names0, types0 = list(zip(*[[fd.name, fd.oft_type] for fd in fds])) names += names0 types += types0 names_types = collections.OrderedDict(list(zip(names, types))) # Retrieve values from layer ogr_layer = self.get_layer(layer_number) dfl = DataFrameLayer(self, layer_number) ogr_layer.ResetReading() values = [[f.GetFID(), DataFrameFeature(dfl, f.GetFID())] + [f.GetField(i) for i in range(nf)] for f in ogr_layer] ogr_layer.ResetReading() # Create DataFrame df = pd.DataFrame(values, columns=names) for name in names: df[name] = df[name].astype(FieldDefinition.oft2numpy(names_types[name])) return df.set_index(column_name_fid) def get_layer_count(self): """Return the number of layers :return: number of layers """ return self.dataset.GetLayerCount() def get_layer_name(self, layer_number=0): """Return the layer name for the given layer number :param layer_number: default is zero :return: layer name """ return self.get_layer(layer_number).GetName() def get_layer(self, layer_number=0): """Return the OGRLayer instance for the given layer number Use this method with care. If the layer is used after this instance (`LayersSet`) is destroyed, the system will probably crash. The following may also crash the system:: lyr = LayersReader(source='D:/tmp/test.shp').get_layer() In order to avoid it, use:: lrs = LayersReader(source='D:/tmp/test.shp') lyr = lrs.get_layer() # lrs should be deleted only after lyr is used del lrs :param layer_number: default is zero :return: OGRLayer """ return self.dataset.GetLayer(layer_number) def layers(self): """ Iterator over all layers :return: """ for i in range(self.dataset.GetLayerCount()): yield self.dataset.GetLayer(i) def get_layer_definition(self, layer_number=0): """Return the `OGRLayerDefn` instance for the given layer number :param layer_number: default is zero :return: OGRLayerDefn """ lyr = self.get_layer(layer_number) return lyr.GetLayerDefn() if lyr else None def layers_definitions(self): """Iterator over layer definitions, one definition for each layer The iterations yield the `OGRLayerDefn` instance of each layer :return: yield OGRLayerDefn """ for i in range(self.dataset.GetLayerCount()): ly = self.dataset.GetLayer(i) yield ly.GetLayerDefn() def get_features_count(self, layer_number=0): """Return the number of features for the given layer number :param layer_number: :return: """ return self.get_layer(layer_number).GetFeatureCount() def create_feature(self, **kwargs): """Create a feature and insert it into the layer :param kwargs: :key geom: geometry as OGRGeometry, wkt, or wkb :key layer_number: default 0 "field name": field name (str) as key and field type as values :return: """ lyr = self.get_layer(layer_number=kwargs.pop('layer_number', 0)) feature = ogr.Feature(lyr.GetLayerDefn()) geom = kwargs.pop('geom') try: feature.SetGeometry(geom) except TypeError: try: feature.SetGeometry(ogr.CreateGeometryFromWkb(geom)) except RuntimeError: feature.SetGeometry(ogr.CreateGeometryFromWkt(geom)) fieldnames_in = list(kwargs.keys()) for k in fieldnames_in: try: feature.SetField(k, kwargs[k]) except (NotImplementedError, RuntimeError) as e: print(k, kwargs[k], type(kwargs[k])) raise e ldf = lyr.GetLayerDefn() for i in range(ldf.GetFieldCount()): fd = ldf.GetFieldDefn(i) if fd.GetName() not in fieldnames_in: df = fd.GetDefault() if df is not None: feature.SetField(i, df) lyr.CreateFeature(feature) del feature def get_field_count(self, layer_number=0): """Return number of fields for the given layer number. FID is not taken into consideration. :param layer_number: default is zero :return: """ return self.get_layer_definition(layer_number).GetFieldCount() def get_field_names(self, layer_number=0): """Return the field names for the given layer number :param layer_number: :return: """ ld = self.get_layer_definition(layer_number) result = [] for i in range(ld.GetFieldCount()): fd = ld.GetFieldDefn(i) result.append(fd.GetName()) return result def get_field_numbers(self, field_names=None, layer_number=0): """Return the field numbers for the given field names. Set field number = -1 if field name does not exist :param field_names: :param layer_number: default is zero :return: list of int """ if isinstance(field_names, basestring): field_names = [field_names] if not field_names: field_names = self.get_field_names(layer_number) ld = self.get_layer_definition(layer_number=layer_number) return [ld.GetFieldIndex(field_name) for field_name in field_names] def get_field_definition(self, field_name, layer_number=0): """Return the FieldDefinition for this field name :param field_name: field name :type field_name: str :param layer_number: layer number. Default is zero :type layer_number: int :return: """ if field_name: ld = self.get_layer_definition(layer_number) for i in range(ld.GetFieldCount()): fd = ld.GetFieldDefn(i) if field_name == fd.GetName(): return FieldDefinition.from_ogr(fd) return None def get_field_definitions(self, field_names=None, layer_number=0): """Returns a list of field definitions :param field_names: :param layer_number: default is zero :return: list of field definitions :rtype: list of FieldDefinition """ ld = self.get_layer_definition(layer_number) result = [] for i in range(ld.GetFieldCount()): fd = ld.GetFieldDefn(i) if not field_names or fd.GetName() in field_names: result.append(FieldDefinition.from_ogr(fd)) return result def get_field_definitions_data_frame(self, field_names=None, layer_number=0): """Return the field definitions in a pandas DataFrame :param field_names: field names. Default None means all fields :param layer_number: layer number, default is zero :return: a pandas DataFrame with field definitions :rtype: pandas.DataFrame """ name_list = list() oft_type_list = list() oft_type_name_list = list() width_list = list() precision_list = list() oft_subtype_list = list() oft_subtype_name_list = list() nullable_list = list() default_list = list() for fd in self.get_field_definitions(field_names=field_names, layer_number=layer_number): name_list.append(fd.name) oft_type_list.append(fd.oft_type) oft_type_name_list.append(fd.get_field_type_name()) width_list.append(fd.width) precision_list.append(fd.precision) oft_subtype_list.append(fd.oft_subtype) oft_subtype_name_list.append(fd.get_field_subtype_name()) nullable_list.append(fd.nullable) default_list.append(fd.default) return pd.DataFrame({'name': name_list, 'type': oft_type_list, 'type_name': oft_type_name_list, 'width': width_list, 'precision': precision_list, 'subtype': oft_subtype_list, 'subtype_name': oft_subtype_name_list, 'nullable': nullable_list, 'default': default_list}, columns=['name', 'type', 'type_name', 'width', 'precision', 'subtype', 'subtype_name', 'nullable', 'default']) def get_field_ids(self, layer_number=0): """Return field ids :param layer_number: :return: field ids :rtype: list """ ly = self.get_layer(layer_number) ly.ResetReading() ids = [feat.GetFID() for feat in ly] ly.ResetReading() return ids def get_field_values(self, field_names=None, layer_number=0): """Return field values :param field_names: :param layer_number: :return: """ if isinstance(field_names, basestring): field_names = [field_names] if not field_names: field_names = self.get_field_names(layer_number) field_numbers = self.get_field_numbers(field_names, layer_number) if -1 in field_numbers: for i in field_numbers: if i == -1: msg = 'Field {} does not exist'.format(field_names[i]) raise AttributeError(msg) ly = self.get_layer(layer_number) ly.ResetReading() feat_list = [(feat.GetFID(), [feat.GetField(fn) for fn in field_numbers]) for feat in ly] ly.ResetReading() if feat_list: feat_list, values_list = list(zip(*feat_list)) else: feat_list, values_list = list(), list() values_list = list(zip(*values_list)) df = pd.DataFrame({field_names[i]: v for i, v in enumerate(values_list)}, index=feat_list, columns=[field_names[i] for i in range(len(values_list))]) df.index.name = 'FID' return df def has_field_names(self, field_names, layer_number=0): """Return a list of bool values with True for existing fields :param field_names: :param layer_number: default is zero :return: list of bool """ if isinstance(field_names, basestring): field_names = [field_names] ld = self.get_layer_definition(layer_number) return [ld.GetFieldIndex(field_name) > -1 for field_name in field_names] def generate_field_name(self, field_name, layer_number=0): """Returns field_name if it does not exist, else append a digit from 0 to n to field_name and return it :param field_name: :param layer_number: default = 0 :return: """ ld = self.get_layer_definition(layer_number) if ld.GetFieldIndex(field_name) == -1: return field_name fn = str(field_name) digits = [] while fn[-1].isdigit(): digits.append(fn[-1]) fn = fn[:-1] if not digits: return field_name + '0' else: i = int(''.join(digits)) field_name = fn fn = field_name + str(i+1) while ld.GetFieldIndex(fn) > -1: i += 1 fn = field_name + str(i) return fn def rename_fields(self, target='', **kwargs): """Rename fields in a new `LayersWriter` Create a copy of this LayersSet with the new field names given in kwargs als `fieldsX`, where X is the layer number. In the following example the field `AREA` from layer number one will be renamed to 'AREAkm2'. The new LayerWriter created in the memory and returned to `lrs1`:: lrs1 = lrs0.rename_fields(fields1={'AREA': 'AREAkm2'}) For different layers:: lrs1 = lrs0.rename_fields(fields0={'N': 'NAME'}, fields1={'AREA': 'AREAkm2'}) Saving the new LayerWriter:: lrs1 = lrs0.rename_fields(fields1={'AREA': 'AREAkm2'}, target='D:/tmp/example.shp', ) :param target: file name or other source used in `LayerWriter`. If empty, creates a `LayerWriter` on memory. :param kwargs: - fields<X>: key used to address layer number <X> - all other kwargs are passed to LayerWriter :return: LayersWriter with the new field names """ field_x_dict = {} for field_x in [f for f in list(kwargs.keys()) if f.startswith('fields')]: field_x_dict[field_x] = kwargs.pop(field_x) if not field_x_dict: msg = 'No parameter starting with field found: {}'.format(sorted(kwargs.keys())) raise ValueError(msg) target = target.strip() if target is None: target = '' if target and not os.path.exists(os.path.dirname(target)): os.makedirs(os.path.dirname(target)) lrs = LayersWriter(source=target) number_of_layers = self.dataset.GetLayerCount() layers_dict = dict() for field_x, fieldname_dict in list(field_x_dict.items()): if not field_x[-1].isdigit(): layer_number0, field_x = 0, field_x + '0' else: f, n = field_x, '' while f and f[-1].isdigit(): n, f = f[-1] + n, f[:-1] layer_number0 = int(n) if layer_number0 >= number_of_layers: msg = 'fields {}: layer {} greater then the maximum number of layers'.format(field_x, str(layer_number0)) raise ValueError(msg) layers_dict[layer_number0] = fieldname_dict for ilc in range(number_of_layers): ly_in = self.dataset.GetLayer(ilc) lrs.dataset.CopyLayer(ly_in, ly_in.GetName(), ['OVERWRITE=YES']) for ilc in range(number_of_layers): ly_out = lrs.dataset.GetLayer(ilc) ld_out = ly_out.GetLayerDefn() n_fields = ld_out.GetFieldCount() fd_out = [ld_out.GetFieldDefn(ifd) for ifd in range(n_fields)] for fd in fd_out: if fd.GetName() in layers_dict[ilc]: fd.SetName(layers_dict[ilc][fd.GetName()]) lrs.dataset.FlushCache() return lrs def fields(self, layer_number=0): """Returns a list of lists containing field name, type code, type name, width, and precision :param layer_number: default is zero :return: list of lists """ ld = self.get_layer_definition(layer_number) for i in range(ld.GetFieldCount()): fd = ld.GetFieldDefn(i) yield fd # ========================================================================= # Geometries # ========================================================================= def _get_geometries(self, method, name, layer_number): """ :param method: :param layer_number: :return: """ ly = self.get_layer(layer_number) ly.ResetReading() feat_list = [(feat.GetFID(), getattr(feat.GetGeometryRef(), method)()) for feat in ly] ly.ResetReading() feat_list, values_list = list(zip(*feat_list)) sr = pd.Series(values_list, index=feat_list) sr.index.name = 'FID' sr.name = name return sr def get_geometry_type(self, layer_number=0): """Return the geometry type for the given layer number :param layer_number: :return: """ return self.get_layer(layer_number).GetGeomType() def get_geometries(self, layer_number=0, fmt='wkb'): """Return the geometries :param layer_number: :param fmt: 'wkb' or 'wkt' :return: :rtype: pandas Series """ if fmt.lower() == 'wkb': sr = self._get_geometries('ExportToWkb', name=DataFrameFeature.geometry_fieldname, layer_number=layer_number) elif fmt.lower() == 'wkt': sr = self._get_geometries('ExportToWkt', name=DataFrameFeature.geometry_fieldname, layer_number=layer_number) else: msg = 'Unknown geometry format {}: valid format are "wkt" or "wkb"'.format(fmt) raise ValueError(msg) return sr def get_geometries_and_field_values(self, field_names=None, layer_number=0, geometry_format='wkb'): """Return geometries and field values for the given layer number :param field_names: :param layer_number: :param geometry_format: :return: """ if isinstance(field_names, basestring): field_names = [field_names] ly = self.get_layer(layer_number) if not field_names: field_names = self.get_field_names(layer_number) field_numbers = self.get_field_numbers(field_names, layer_number) field_names = [DataFrameFeature.geometry_fieldname] + field_names ly.ResetReading() geometry_format = str(geometry_format) if geometry_format.lower() == 'wkb': feat_list = [(feat.GetFID(), [feat.GetGeometryRef().ExportToWkb()] + [feat.GetField(fn) for fn in field_numbers]) for feat in ly] elif geometry_format.lower() == 'wkt': feat_list = [(feat.GetFID(), [feat.GetGeometryRef().ExportToWkt()] + [feat.GetField(fn) for fn in field_numbers]) for feat in ly] else: msg = 'Unknown geometry format {}: valid format are "wkt" or "wkb"'.format(geometry_format) raise ValueError(msg) ly.ResetReading() feat_list, values_list = list(zip(*feat_list)) values_list = list(zip(*values_list)) df = pd.DataFrame({field_names[i]: v for i, v in enumerate(values_list)}, index=feat_list, columns=field_names) df.index.name = 'FID' return df def get_geometries_areas(self, data_frame=True, layer_number=0): sr = self._get_geometries('GetArea', 'area', layer_number=layer_number) if data_frame: df = pd.DataFrame(sr.values.tolist(), columns=['area']) df.index.name = sr.index.name return df else: return sr def get_geometries_boundaries(self, fmt='wkt', layer_number=0): fmt = fmt.lower() geo_formats = {'gml': 'ExportToGML', 'isowkb': 'ExportToIsoWkb', 'isowkt': 'ExportToIsoWkt', 'json': 'ExportToJson', 'kml': 'ExportToKML', 'wkb': 'ExportToWkb', 'wkt': 'ExportToWkt'} if fmt not in list(geo_formats.keys()): msg = 'Format {} unknown'.format(str(fmt)) raise ValueError(msg) sr = self._get_geometries('GetBoundary', 'boundary', layer_number=layer_number) return sr.apply(lambda g: getattr(g, geo_formats[fmt])()) def get_geometries_coordinate_dimensions(self, layer_number=0): return self._get_geometries('GetCoordinateDimension', 'coordinate dimension', layer_number=layer_number) def get_geometries_curve(self, layer_number=0): return self._get_geometries('GetCurveGeometry', 'curve geometry', layer_number=layer_number) def get_geometries_dimensions(self, layer_number=0): return self._get_geometries('GetDimension', 'dimension', layer_number=layer_number) def geometries_export(self, fmt='wkt', layer_number=0): fmt = fmt.lower() geo_formats = {'gml': 'ExportToGML', 'isowkb': 'ExportToIsoWkb', 'isowkt': 'ExportToIsoWkt', 'json': 'ExportToJson', 'kml': 'ExportToKML', 'wkb': 'ExportToWkb', 'wkt': 'ExportToWkt'} if fmt not in list(geo_formats.keys()): msg = 'Format {} unknown'.format(str(fmt)) raise ValueError(msg) return self._get_geometries(geo_formats[fmt], DataFrameFeature.geometry_fieldname, layer_number=layer_number) def get_geometries_centroids(self, data_frame=True, layer_number=0): sr = self._get_geometries('Centroid', 'centroid', layer_number=layer_number) index_name = sr.index.name sr = sr.apply(lambda p: p.GetPoints()[0]) if data_frame: df = pd.DataFrame(sr.values.tolist(), columns=['x_center', 'y_center']) df.index.name = index_name return df else: return sr def get_geometries_envelopes(self, data_frame=True, layer_number=0): sr = self._get_geometries('GetEnvelope', 'envelope', layer_number=layer_number) return pd.DataFrame(sr.values.tolist(), columns=['xmin', 'xmax', 'ymin', 'ymax']) if data_frame else sr def get_geometries_envelopes_3d(self, data_frame=True, layer_number=0): sr = self._get_geometries('GetEnvelope3D', 'envelope 3D', layer_number=layer_number) columns = ['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'] return pd.DataFrame(sr.values.tolist(), columns=columns) if data_frame else sr def get_geometries_count(self, layer_number=0): """Return the number of geometries for each record :param layer_number: :return: number of geometries in each feature :rtype: pandas Series """ return self._get_geometries('GetGeometryCount', 'geom. count', layer_number=layer_number) def get_geometries_linear(self, layer_number=0): return self._get_geometries('GetLinearGeometry', 'linear geometry', layer_number=layer_number) def get_geometries_names(self, layer_number=0): """Return the geometries names for each record :param layer_number: :return: names of the geometries in each feature :rtype: pandas Series """ return self._get_geometries('GetGeometryName', 'geom. name', layer_number=layer_number) def get_geometries_types(self, layer_number=0): return self._get_geometries('GetGeometryType', 'geom. type', layer_number=layer_number) def get_geometries_m(self, layer_number=0): return self._get_geometries('GetM', 'm', layer_number=layer_number) def get_geometries_x(self, layer_number=0): return self._get_geometries('GetX', 'x', layer_number=layer_number) def get_geometries_y(self, layer_number=0): return self._get_geometries('GetY', 'y', layer_number=layer_number) def get_geometries_z(self, layer_number=0): return self._get_geometries('GetZ', 'z', layer_number=layer_number) def get_geometries_point_count(self, layer_number=0): """Return zeros if geometries are not points, else the number of points in each geometry :param layer_number: :return: """ return self._get_geometries('GetPointCount', 'point count', layer_number=layer_number) def get_geometries_points_2d(self, data_frame=True, layer_number=0): sr = self._get_geometries('GetPoint_2D', 'point 2D', layer_number=layer_number) if data_frame: columns = ['x', 'y'] df = pd.DataFrame(sr.values.tolist(), columns=columns) df.index.name = 'FID' return df else: return sr def get_geometries_points(self, data_frame=True, layer_number=0): sr = self._get_geometries('GetPoint', 'point', layer_number=layer_number) if data_frame: columns = ['x', 'y', 'z'] df = pd.DataFrame(sr.values.tolist(), columns=columns) df.index.name = 'FID' return df else: return sr def get_geometries_points_zm(self, data_frame=True, layer_number=0): sr = self._get_geometries('GetPointZM', 'point ZM', layer_number=layer_number) if data_frame: columns = ['x', 'y', 'z', 'm'] df = pd.DataFrame(sr.values.tolist(), columns=columns) df.index.name = 'FID' return df else: return sr def get_geometries_spatial_references(self, layer_number=0): return self._get_geometries('GetSpatialReference', 'spatial reference', layer_number=layer_number) def get_geometries_wkb_size(self, layer_number=0): return self._get_geometries('WkbSize', 'wkb size', layer_number=layer_number) def get_geometry_union_as_wkb(self, layer_number=0): """Return the geometry union :param layer_number: :return: """ ly = self.get_layer(layer_number) union = None ly.ResetReading() for feat in ly: geom = feat.GetGeometryRef().ExportToWkb() if geom: union = ogr.CreateGeometryFromWkb(geom) if not union else union.Union(ogr.CreateGeometryFromWkb(geom)) return union.ExportToWkb() def is_geometry_3d(self, layer_number=0): return self._get_geometries('Is3D', 'Is3D', layer_number=layer_number) def is_geometry_empty(self, layer_number=0): return self._get_geometries('IsEmpty', 'IsEmpty', layer_number=layer_number) def is_geometry_measured(self, layer_number=0): return self._get_geometries('IsMeasured', 'IsMeasured', layer_number=layer_number) def is_geometry_ring(self, layer_number=0): return self._get_geometries('IsRing', 'IsRing', layer_number=layer_number) def is_geometry_simple(self, layer_number=0): return self._get_geometries('IsSimple', 'IsSimple', layer_number=layer_number) def is_geometry_valid(self, layer_number=0): return self._get_geometries('IsValid', 'IsValid', layer_number=layer_number) def is_geometry_point(self, layer_number=0): """Check whether the layer has a point geometry :param layer_number: :return: """ return self.get_geometries_types(layer_number=layer_number).apply(lambda t: is_topology_0d(t)) def is_geometry_polygon(self, layer_number=0): """Check whether the layer has a polygon geometry :param layer_number: :return: """ g_types = self.get_geometries_types(layer_number=layer_number).apply(lambda t: is_topology_2d(t)) return g_types def get_coordinate_system(self, fmt='wkt', layer_number=0): """Return the coordinate system Possible formats: * 'xml' * 'wkt' * 'usgs' * 'proj4' * 'prettywkt' * 'pci' * 'micoordsys' :param fmt: :param layer_number: :return: """ ly_in = self.dataset.GetLayer(layer_number) return srs.export(ly_in.GetSpatialRef(), fmt=fmt) def get_extent(self, layer_number=0, scale=0.0): """Return the layer extension (bounding box) :param layer_number: default is zero :param scale: fraction of width and height to scale. Default is zero (no scaling). Negative values shrink the extention. :return: (xmin, xmax, ymin, ymax) as floats """ xmin, xmax, ymin, ymax = self.get_layer(layer_number).GetExtent() if scale and scale != 0.0: dx, dy = xmax - xmin, ymax - ymin xmin -= dx * scale xmax += dx * scale ymin -= dy * scale ymax += dy * scale return xmin, xmax, ymin, ymax def transform(self, other=None, **kwargs): """Transform to another coordinate system :param other: another object returning srs as wkt from 'get_coordinate-system()' :param kwargs: Keys/values for the transformation: :key srs: (osgeo.osr.SpatialReference) srs object :key epsg: (str) :key epsga: (str) :key erm: (str) :key esri: (str) :key mi: (str) :key ozi: (str) :key pci: (str) :key proj4: (str) :key url: (str) :key usgs: (str) :key wkt: (str) :key xml: (str) Further key/values are used in LayersWriter :return: """ sr_out = None if other: sr_out = srs.srs_from_wkt(other.get_coordinate_system()) if not sr_out: sr_out = kwargs.pop('srs', None) if not sr_out: keys = ['epsg', 'epsga', 'erm', 'esri', 'mi', 'ozi', 'pci', 'proj4', 'url', 'usgs', 'wkt', 'xml'] sr_out = [srs.get_srs(**{k: kwargs.pop(k, None)}) for k in keys if k in list(kwargs.keys())][0] if not sr_out: return None lrs_out = LayersWriter(**kwargs) for ilc, lyr_self in enumerate(self.layers()): ld_in = self.get_layer_definition(ilc) sr_self = lyr_self.GetSpatialRef() coord_trans = osr.CoordinateTransformation(sr_self, sr_out) fields_indices = self.get_field_numbers() fd_out = [FieldDefinition.from_ogr(ld_in.GetFieldDefn(i)) for i in fields_indices] ly_out = lrs_out.create_layer(str(ilc), sr_out, lyr_self.GetGeomType(), fd_out) ld_out = ly_out.GetLayerDefn() lyr_self.ResetReading() for feat in lyr_self: feat_new = ogr.Feature(ld_out) for idx in fields_indices: feat_new.SetField(idx, feat.GetField(idx)) g = feat.GetGeometryRef() geom_out = g.Clone() geom_out.Transform(coord_trans) feat_new.SetGeometry(geom_out) ly_out.CreateFeature(feat_new) feat = None lyr_self.ResetReading() lrs_out.dataset.FlushCache() return lrs_out def set_attribute_filter(self, filter_string, layer_number=0): """Set an attribute filter :param filter_string: :param layer_number: :return: """ ly = self.get_layer(layer_number) ly.ResetReading() ly.SetAttributeFilter(filter_string) def set_spatial_filter(self, lrs, layer_number=0): """Set a spatial filter :param lrs: :param layer_number: :return: """ try: lrs = LayersReader(lrs) geometries = lrs.get_geometries(layer_number) except (TypeError, RuntimeError): try: geometries = lrs.get_geometries(layer_number) except (TypeError, RuntimeError): geometries = lrs geom = geometries_to_geometry_collection(geometries) ly = self.get_layer(layer_number) ly.ResetReading() g = ogr.CreateGeometryFromWkb(geom) ly.SetSpatialFilter(g) def spatial_filter(self, lrs, layer_number=0, method='Intersects'): """Yield features according to a spatial filter :param lrs: :param layer_number: :param method: :return: """ ly = self.get_layer(layer_number) if method == 'Disjoint': instc_set = set([feat.GetFID() for feat in self.spatial_filter(lrs, layer_number, method='Intersects')]) ly.ResetReading() for feat in ly: if feat.GetFID() not in instc_set: yield feat else: geometries = lrs.get_geometries(layer_number) geometries = [ogr.CreateGeometryFromWkb(g) for g in geometries] envelopes = [g.GetEnvelope() for g in geometries] ly.ResetReading() for feat in ly: geom0 = feat.GetGeometryRef() e0 = geom0.GetEnvelope() for i, e1 in enumerate(envelopes): if not (e0[0] > e1[1] or e0[1] < e1[0] or e0[2] > e1[3] or e0[3] < e1[2]): if getattr(geom0, method)(geometries[i]): yield feat feat = None break ly.ResetReading() def spatial_filter_to_layer(self, lrs, layer_number=0, method='Intersects', output_layers=''): """Copy the result of the spatial filter to a layer :param lrs: :param layer_number: :param method: :param output_layers: :return: """ lrs = lrs.transform(other=self) fids = set([feat.GetFID() for feat in self.spatial_filter(lrs, layer_number, method)]) return self.copy(target=output_layers, filter=lambda fid: fid in fids, ffields='FID') def buffer(self, buffer_dist, target=None, layer_number=0): """Return buffer :param buffer_dist: :param target: :param layer_number: :return: """ lyr_in = self.get_layer(layer_number) lrs_out = LayersWriter(source=target) lyr_out = lrs_out.create_layer(str(layer_number), lyr_in.GetSpatialRef(), geom=ogr.wkbPolygon) fd_out = lyr_out.GetLayerDefn() lyr_in.ResetReading() for feat_in in lyr_in: geom_in = feat_in.GetGeometryRef() geom_out = geom_in.Buffer(buffer_dist) feat_out = ogr.Feature(fd_out) feat_out.SetGeometry(geom_out) lyr_out.CreateFeature(feat_out) feat_out = None return lrs_out def centroid(self, target=None, drivername=None, **kwargs): """Return a new `LayersWriter` with centroids Return a LayersWriter with the centroids of the geometries in this LayersSet :param target: see `source` in LayersWriter :param drivername: see LayersWriter :param kwargs: see LayersWriter :return: """ lrs_point = LayersWriter(source=target, drivername=drivername, **kwargs) for i in range(self.get_layer_count()): lyr_self = self.get_layer(i) ld_self = self.get_layer_definition(layer_number=i) field_definitions0 = [] for ifd in range(ld_self.GetFieldCount()): field_definitions0.append(FieldDefinition.from_ogr(ld_self.GetFieldDefn(ifd))) lyr_point = lrs_point.create_layer(lyr_self.GetName(), prj=lyr_self.GetSpatialRef(), geom=ogr.wkbPoint, field_definitions=field_definitions0) feature_def = lyr_point.GetLayerDefn() lyr_self.ResetReading() for feat in lyr_self: feature = ogr.Feature(feature_def) for idx in range(len(field_definitions0)): feature.SetField(idx, feat.GetField(idx)) g = feat.GetGeometryRef().Centroid() feature.SetGeometry(g) lyr_point.CreateFeature(feature) return lrs_point def convex_hull(self, target=None): """Return the convex hull :param target: :return: """ lrs = LayersWriter(source=target) for ilc in range(self.dataset.GetLayerCount()): ly_in = self.get_layer(ilc) geom = self.get_convex_hull_as_wkb(ilc) id_field = ogr.FieldDefn('id', ogr.OFTInteger) ly_out = lrs.create_layer(str(ilc), ly_in.GetSpatialRef(), ly_in.GetGeomType()) ly_out.CreateField(id_field) feature_def = ly_out.GetLayerDefn() feature = ogr.Feature(feature_def) feature.SetGeometry(ogr.CreateGeometryFromWkb(geom)) feature.SetField("id", 1) ly_out.CreateFeature(feature) ly_out.SyncToDisk() return lrs def get_convex_hull_as_wkb(self, layer_number=0): """Return the convex hull as well known binary format :param layer_number: :return: """ geom_col = ogr.Geometry(ogr.wkbGeometryCollection) ly = self.get_layer(layer_number) ly.ResetReading() for feat in ly: geom_col.AddGeometry(feat.GetGeometryRef()) # Calculate convex hull return geom_col.ConvexHull().ExportToWkb() def dissolve(self, **kwargs): """Dissolve to a new `LayersWriter` :param kwargs: :return: """ from girs.feat.proc import dissolve return dissolve(self, **kwargs) class LayersReader(LayersSet): """`LayersReader` inherits from `LayersSet`. It is responsible for creating a read only instance of a `OGRDataSet`. It has only a constructor. .. seealso:: LayersSet """ def __init__(self, source, drivername=None): """Create a read only instance of a `OGRDataSet` :param source: file name, database or any other valid source in `ogr.Open(source, 0)` :type: str """ super(LayersReader, self).__init__() if drivername: drv = ogr.GetDriverByName(drivername) self.dataset = drv.Open(source, 0) else: self.dataset = ogr.Open(source, 0) if self.dataset is None: msg = 'Could not open {}'.format(str(source)) raise ValueError(msg) class LayersEditor(with_metaclass(ABCMeta, LayersSet)): """`LayersEditor` is an abstract class that inherits from `LayersSet` and is basis class of `LayersUpdate` and `LayersWrite`. It contains methods to edit an existing or new dataset. """ def __init__(self): """Initializes the upper class """ super(LayersEditor, self).__init__() def create_layer(self, name, prj, geom, field_definitions=()): """Create a new layer :param name: (str) name of the layer :param prj: :param geom: :param field_definitions: :return: """ ly = self.dataset.CreateLayer(name, prj, geom) if not ly: raise Exception('Layer {} with projection {} and geometry {} could not be created'.format(name, prj, geom)) for fd in field_definitions: try: ly.CreateField(fd.to_ogr()) except TypeError as e: ly.CreateField(fd) return ly def field_calculator(self, field_name, calc_function, calc_field_names, layer_number=0): """Calculate field values for given field names. :param field_name: :param calc_function: :param calc_field_names: :param layer_number: :return: """ ly = self.get_layer(layer_number) ly.ResetReading() idx = self.get_field_numbers(calc_field_names) for feat in ly: feat.SetField(field_name, calc_function(*[feat.GetField(i) for i in idx])) ly.SetFeature(feat) ly.ResetReading() def calculate_area(self, field_name='area', scale=1.0, layer_number=0): """Calculate feature areas and set them to the given field name :param field_name: :param scale: :param layer_number: :return: """ if self.is_geometry_polygon(layer_number).all(): ld = self.get_layer_definition(layer_number) fidx = ld.GetFieldIndex(field_name) if fidx > -1: ly = self.get_layer(layer_number) ly.ResetReading() for feat in ly: area = feat.GetGeometryRef().GetArea() * scale feat.SetField(fidx, area) ly.SetFeature(feat) del feat ly.SyncToDisk() def add_fields(self, **kwargs): """Add fields to this layers set :param kwargs: the key starting with `fields` :return: """ fields_dict = {int(k[6:]) if k[6:] else 0: v for k, v in list(kwargs.items()) if k.lower().startswith('fields')} for layer_number in sorted(fields_dict.keys()): ly = self.get_layer(layer_number) try: field_definitions0 = fields_dict[layer_number] _ = iter(field_definitions0) except TypeError: field_definitions0 = [fields_dict[layer_number]] for fd0 in field_definitions0: fd1 = ogr.FieldDefn(fd0.name, fd0.oft_type) if fd0.width: fd1.SetWidth(fd0.width) if fd0.precision: fd1.SetPrecision(fd0.precision) if fd0.oft_subtype: fd1.SetSubType(fd0.oft_subtype) if fd0.nullable: fd1.SetNullable(fd0.nullable) if fd0.default: fd1.SetDefault(fd0.default) ly.CreateField(fd1) def delete_fields(self, field_names, layer_number=0): """Delete fields from this layers set :param field_names: :param layer_number: :return: """ ly = self.get_layer(layer_number) if ly.TestCapability(ogr.OLCDeleteFeature): for idx in sorted([i for i in self.get_field_numbers(field_names=field_names, layer_number=layer_number) if i > -1], reverse=True): ly.DeleteField(idx) def delete_features(self, **kwargs): """ :param kwargs: filter: filter function ffields: list of field names to apply the filter function, or a single field name """ number_of_layers = self.dataset.GetLayerCount() layers_dict = {layer_number: [None]*3 for layer_number in range(number_of_layers)} for k, parameters_list in list(kwargs.items()): if k.startswith('filter'): idx = 0 elif k.startswith('ffields'): idx = 1 else: msg = 'option {} does not exist. Options are filter, and ffields'.format(k) raise ValueError(msg) if not k[-1].isdigit(): layer_number = 0 k = k + '0' else: f = k n = '' while f and f[-1].isdigit(): n = f[-1] + n f = f[:-1] layer_number = int(n) if layer_number >= number_of_layers: msg = 'field {}: layer {} greater then the maximum number of layers'.format(k, str(layer_number)) raise ValueError(msg) layers_dict[layer_number][idx] = parameters_list for ilc in range(number_of_layers): ly_in = self.dataset.GetLayer(ilc) ld_in = ly_in.GetLayerDefn() parameters = layers_dict[ilc] f_filter = parameters[0] f_fields = parameters[1] # Get filter fields indices if isinstance(f_fields, basestring): f_fields = [f_fields] ffields_indices = sorted([ld_in.GetFieldIndex(fn) if fn != 'FID' else 'FID' for fn in f_fields]) if -1 in ffields_indices: msg = 'Filter field {} does not exist'.format(f_fields[ffields_indices.index(-1)]) raise ValueError(msg) for feat in ly_in: feat_id = feat.GetFID() if f_filter and f_filter(*[feat_id if idx == 'FID' else feat.GetField(idx) for idx in ffields_indices]): ly_in.DeleteFeature(feat_id) ly_in.ResetReading() class LayersUpdate(LayersEditor): """`LayersUpdate` inherits from `LayersEditor`. It is responsible for creating an update instance of a `OGRDataSet`. It has only a constructor. .. seealso:: - LayersSet - LayersEditor """ def __init__(self, source): """ :param source: """ super(LayersUpdate, self).__init__() try: source = source.get_source() except AttributeError: pass self.dataset = ogr.Open(source, 1) class LayersWriter(LayersEditor): """`LayersWriter` inherits from `LayersEditor`. It is responsible for creating a new instance of a `OGRDataSet`. It has only a constructor. .. seealso:: - LayersSet - LayersEditor """ def __init__(self, *args, **kwargs): """Instantiate a LayersWriter object Example: .. code-block:: python srs0 = get_srs(epsg=4326) fields0 = [FieldDefinition("Name", ogr.OFTString), FieldDefinition("Area", ogr.OFTReal)] fields1 = [FieldDefinition("Code", ogr.OFTInteger), FieldDefinition("Capital", ogr.OFTString)] LayersWriter(None, [ogr.wkbPolygon, srs0, fields0], [ogr.wkbPoint, srs0, fields1]) LayersWriter('', [ogr.wkbPolygon, srs0, fields0], [ogr.wkbPoint, srs0, fields1]) LayersWriter(D:/tmp/mfile.shp, ['', ogr.wkbPolygon, srs0, fields0]) LayersWriter(D:/tmp/mfile.gml, ['park', ogr.wkbPolygon, srs0, fields0], ['area', ogr.wkbPoint, srs0, fields1], drivername='GML') :param args: layer parameters with at least name and geometry type. Any of the following lists is valid: - [name, geometry type] - [name, geometry type, spatial reference system] - [name, geometry type, spatial reference system, list of girs.feat.layers.FieldDefinition] :param kwargs: :key source: (str) source name (e.g., file name with extension) :key drivername: a short driver name (see :meth:`FeatDrivers.get_driver_names()`). E.g., 'Memory' """ super(LayersWriter, self).__init__() source = kwargs.pop('source', '') if source is None: source = '' dr = FeatDrivers.get_driver(source, kwargs.pop('drivername', None)) self.dataset = dr.CreateDataSource(source) if self.dataset is None: if os.path.exists(source): raise ValueError("Data source {} already exists and will not be overwritten".format(str(source))) else: raise ValueError("Data source {} could not be created".format(str(source))) for parameters in args: n = len(parameters) name = parameters[0] if parameters is not None else '' geom = parameters[1] prj = parameters[2] if n > 2 else None field_definitions0 = parameters[3] if n > 3 else None self.create_layer(name, prj, geom, field_definitions0) def delete_layers(source): """Delete layers defined in `source` Example:: delete_layers('D:/tmp/country.shp') :param source: a data source, typically a file name (full path) :type source: str :return: """ if os.path.exists(source): driver = FeatDrivers.get_driver(source=source) driver.DeleteDataSource(source) def _get_unique_field_names(field_names): """Append a digit to fieldname duplicates :param field_names: :return: list of field names without duplicates """ import collections repeated_fieldnames = {c: -2 for c, count in list(collections.Counter(field_names).items()) if count > 1} new_filed_names = list() for field_name in field_names: if field_name in repeated_fieldnames: repeated_fieldnames[field_name] += 1 if repeated_fieldnames[field_name] >= 0: field_name += str(repeated_fieldnames[field_name]) new_filed_names.append(field_name) return new_filed_names def data_frame_to_layer(df, target=''): """Return a `LayersWriter` from the pandas DataFrame. The columns must have unique names. One column must be denominated according to DataFrameFeature.geometry_fieldname '_GEOM_' :param df: the DataFrame :type df: pandas.DataFrame :param target: an object LayerWriter, a file name of a new LayerWriter, or a dataset from a LayerWriter (writable) :type target: LayerWriter, str, or OGRDataSet :return: LayerWriter instance """ # Check unique column names column_names = df.columns.tolist() if len(column_names) != len(set(column_names)): msg = 'Non unique column names: {}'.format( set([str(c) for c in column_names if column_names.count(c) > 1])) raise RuntimeError(msg) field_definitions0 = get_field_definitions_from_data_frame(df) df_geom_field_definition = [fd for fd in field_definitions0 if fd.name == DataFrameFeature.geometry_fieldname] if len(df_geom_field_definition) != 1: msg = 'No geometry field found' if len(df_geom_field_definition) == 0 else 'Geometry field not unique' raise RuntimeError(msg) else: df_geom_field_definition = df_geom_field_definition[0] # The geometry column may have one and the same layer in all records, as well as no duplicate FID inside the # columns' objects DataFrameGeometry sr_geom = df[df_geom_field_definition.name] if len(set([geom.get_layer() for geom in sr_geom])) != 1: msg = 'DataFrame contains different layers: {}'.format(set([geom.get_layer() for geom in sr_geom])) raise RuntimeError(msg) if len(set([geom.feature_id for geom in sr_geom])) != len(sr_geom): msg = 'DataFrame contains duplicated FIDs' raise RuntimeError(msg) # Get field names from the layer and from the data frame, deleting the geom column of the data frame lyr_in = sr_geom.iloc[0].get_layer() target = '' if target is None else target.strip() if target and not os.path.exists(os.path.dirname(target)): os.makedirs(os.path.dirname(target)) lrs_out = LayersWriter(source=target) lyr_out = lrs_out.create_layer('', lyr_in.GetSpatialRef(), lyr_in.GetGeomType()) field_definitions1 = list(field_definitions0) field_definitions1.remove(df_geom_field_definition) lrs_out.add_fields(fields0=field_definitions1) ldf_out = lrs_out.get_layer_definition() # copy features def create_feature(sr): g_wkb = sr.loc[df_geom_field_definition.name].get_geometry() sr = sr.drop(df_geom_field_definition.name) feat_new = ogr.Feature(ldf_out) for fn0, value in zip(sr.index, sr): feat_new.SetField(fn0, value) feat_new.SetGeometry(ogr.CreateGeometryFromWkb(g_wkb)) lyr_out.CreateFeature(feat_new) # This is the slow part: df.apply(create_feature, axis=1) lrs_out.dataset.FlushCache() return lrs_out # def data_frame_to_layer(df, target=''): # """Return a `LayersWriter` from the pandas DataFrame. # # :param df: the DataFrame # :type df: pandas.DataFrame # :param target: an object LayerWriter, a file name of a new LayerWriter, or a dataset from a LayerWriter (writable) # :type target: LayerWriter, str, or OGRDataSet # :return: LayerWriter instance # """ # # TODO: if column names in DataFrame are the same as in layers, use layer's field types instead of DataFrame types # # if df is None or df.dropna().empty: # # return None # # # Get column names repetitions # df.columns = _get_unique_field_names(df.columns.tolist()) # # field_definitions0 = get_field_definitions_from_data_frame(df) # # df_field_names = df.columns.tolist() # df_geom_field_index_name = [(idx, field_name) for idx, field_name in enumerate(df_field_names) # if isinstance(df[field_name].iloc[0], DataFrameGeometry)] # if not df_geom_field_index_name: # msg = 'Column with type DataFrameGeometry not found' # raise RuntimeError(msg) # df_geom_field_index = df_geom_field_index_name[0][0] # df_geom_field_name = df_geom_field_index_name[0][1] # # The geometry column may have one and the same layer in all records, as well as no duplicate FID inside the # # columns' objects DataFrameGeometry # # df_geom = df[df_geom_field_name] # if len(set([geom.get_layer() for geom in df_geom])) != 1: # msg = 'DataFrame contains different layers' # raise RuntimeError(msg) # if len(set([geom.feature_id for geom in df_geom])) != len(df_geom): # msg = 'DataFrame contains duplicated FIDs' # raise RuntimeError(msg) # # Get field names from the layer and from the data frame, deleting the geom column of the data frame # geom0 = df_geom.iloc[0] # lyr_in = geom0.get_layer() # field_names_df = df.columns.values.tolist() # del field_names_df[df_geom_field_index] # # Check same field type in the column # # field_definitions0 = [series_to_field_definition(df[fn]) for fn in field_names_df] # # target = '' if target is None else target.strip() # if target and not os.path.exists(os.path.dirname(target)): # os.makedirs(os.path.dirname(target)) # lrs_out = LayersWriter(target) # lyr_out = lrs_out.create_layer('', lyr_in.GetSpatialRef(), lyr_in.GetGeomType()) # lrs_out.add_fields(fields0=field_definitions0) # ldf_out = lrs_out.get_layer_definition() # # # copy features # def create_feature(sr): # g_wkb = sr.loc['GEOM'].get_geometry() # sr = sr.drop('GEOM') # feat_new = ogr.Feature(ldf_out) # for fn0, value in zip(sr.index, sr): # feat_new.SetField(fn0, value) # feat_new.SetGeometry(ogr.CreateGeometryFromWkb(g_wkb)) # lyr_out.CreateFeature(feat_new) # # This is the slow part: # df.apply(create_feature, axis=1) # lrs_out.dataset.FlushCache() # return lrs_out # def field_definitions(layer_definition): # """Yield field definitions # # :param layer_definition: a layer definition # :type layer_definition: ogr.FeatureDefn # :return: # """ # for i in range(layer_definition.GetFieldCount()): # yield layer_definition.GetFieldDefn(i) # def series_to_field_definition(sr): # """Transform pandas Series name into field definition instances # # Transform numpy types of pandas DataFrame columns into field # # :param sr: # :return: # """ # col_types = set([type(v) for v in sr.values]) # field_name = sr.name # if len(col_types) != 1: # msg = 'Different types in column {}: {}'.format(field_name, ', '.join(list(col_types))) # raise ValueError(msg) # oft_type = FieldDefinition.data_frame_to_ogr_type_dict[type(sr.values[0])] # fd = FieldDefinition(name=field_name, oft_type=oft_type) # return fd # def get_field_definitions_from_data_frame(df): """Return a list of field definitions to the DataFrame columns. Return one instance of FieldDefinition for each column. For the geometry column, return a FieldDefinition with name 'DataFrameFeature.geometry_fieldname' and oft_type ogr.OFTString :param df: data frame with a geometry column :return: list of field definitions """ sr_geom = df[DataFrameFeature.geometry_fieldname] geom_col_types = set([type(v) for v in sr_geom.values]) if len(geom_col_types) != 1: msg = 'Different types in the geometry column: {}'.format(', '.join(list(geom_col_types))) raise ValueError(msg) data_frame_geom = sr_geom.values[0] data_frame_layer = data_frame_geom.data_frame_layer lyr = data_frame_layer.layer ldf = lyr.GetLayerDefn() layer_field_names = list() for i in range(ldf.GetFieldCount()): fd = ldf.GetFieldDefn(i) layer_field_names.append(fd.GetName()) result = list() for field_name_df in df.columns: sr = df[field_name_df] col_types = set([type(v) for v in sr.values]) if len(col_types) != 1: msg = 'Different types in column {}: {}'.format(field_name_df, ', '.join(list(col_types))) raise ValueError(msg) idx_layer = [idx for idx, field_name_lyr in enumerate(layer_field_names) if field_name_lyr == field_name_df] if idx_layer: fd = ldf.GetFieldDefn(idx_layer[0]) result.append(FieldDefinition.from_ogr(fd)) else: oft_type = FieldDefinition.numpy2oft(type(sr.values[0])) result.append(FieldDefinition(name=field_name_df, oft_type=oft_type)) return result
mit
adamrvfisher/TechnicalAnalysisLibrary
IncrementalStrategyOptimizer.py
1
8252
# -*- coding: utf-8 -*- """ Created on Sun Jul 22 19:13:12 2018 @author: AmatVictoriaCuramIII """ #Drag w/ Increment import numpy as np import random as rand import pandas as pd import time as t from DatabaseGrabber import DatabaseGrabber from YahooGrabber import YahooGrabber #Inputs - OHLC data Ticker1 = 'UVXY' Asset1 = DatabaseGrabber(Ticker1) Asset1 = Asset1[:] #In Iterations = range(0,100) Counter = 1 #Numbered subindex Asset1['SubIndex'] = range(1,len(Asset1)+1) Empty = [] Dataset = pd.DataFrame() for n in Iterations: #Variable windows MinWindow = rand.randint(50,200) HoldPeriod = rand.randint(25,200) ATRWindow = 20 PositionSize = 1 + (rand.random() * 5) # 8 = 8% of account per leg UniformMove = rand.random() * .6 # .5 = 1.5 highoverrollingmin for first unit to be active PositionScale = rand.random() * 4 # 8 = add 8% to each new leg over previous leg #Log Returns Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1)) Asset1['LogRet'] = Asset1['LogRet'].fillna(0) # Asset1['Method1'] = Asset1['High'] - Asset1['Low'] # Asset1['Method2'] = abs((Asset1['High'] - Asset1['Close'].shift(1))) # Asset1['Method3'] = abs((Asset1['Low'] - Asset1['Close'].shift(1))) # Asset1['Method1'] = Asset1['Method1'].fillna(0) # Asset1['Method2'] = Asset1['Method2'].fillna(0) # Asset1['Method3'] = Asset1['Method3'].fillna(0) # Asset1['TrueRange'] = Asset1[['Method1','Method2','Method3']].max(axis = 1) # Asset1['ATR'] = Asset1['TrueRange'].rolling(window = ATRWindow, # center=False).mean() ##Market top and bottom calculation #Asset1['RollingMax'] = Asset1['High'].rolling(window=donchianwindow, center=False).max() Asset1['RollingMin'] = Asset1['Low'].rolling(window=MinWindow, center=False).min() Asset1['HighOverRollingMin'] = Asset1['High']/Asset1['RollingMin'] #Unit 1 Asset1['UnitOne'] = 0 Asset1['UnitOne'] = np.where(Asset1['HighOverRollingMin'] > 1 + (1 * UniformMove), PositionSize, 0) for i in range(0,HoldPeriod): Asset1['UnitOne'] = np.where(Asset1['UnitOne'].shift(1) == PositionSize, PositionSize, Asset1['UnitOne']) #Unit 2 Asset1['UnitTwo'] = 0 Asset1['UnitTwo'] = np.where(Asset1['HighOverRollingMin'] > 1 + (2 * UniformMove), (PositionSize + (1 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitTwo'] = np.where(Asset1['UnitTwo'].shift(1) == (PositionSize + (1 * PositionScale)), (PositionSize + (1 * PositionScale)), Asset1['UnitTwo']) #Unit 3 Asset1['UnitThree'] = 0 Asset1['UnitThree'] = np.where(Asset1['HighOverRollingMin'] > 1 + (3 * UniformMove), (PositionSize + (2 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitThree'] = np.where(Asset1['UnitThree'].shift(1) == (PositionSize + (2 * PositionScale)), (PositionSize + (2 * PositionScale)), Asset1['UnitThree']) #Unit 4 Asset1['UnitFour'] = 0 Asset1['UnitFour'] = np.where(Asset1['HighOverRollingMin'] > 1 + (4 * UniformMove), (PositionSize + (3 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitFour'] = np.where(Asset1['UnitFour'].shift(1) == (PositionSize + (3 * PositionScale)), (PositionSize + (3 * PositionScale)), Asset1['UnitFour']) #Unit 5 Asset1['UnitFive'] = 0 Asset1['UnitFive'] = np.where(Asset1['HighOverRollingMin'] > 1 + (5 * UniformMove), (PositionSize + (4 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitFive'] = np.where(Asset1['UnitFive'].shift(1) == (PositionSize + (4 * PositionScale)), (PositionSize + (4 * PositionScale)), Asset1['UnitFive']) #Unit 6 Asset1['UnitSix'] = 0 Asset1['UnitSix'] = np.where(Asset1['HighOverRollingMin'] > 1 + (6 * UniformMove), (PositionSize + (5 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitSix'] = np.where(Asset1['UnitSix'].shift(1) == (PositionSize + (5 * PositionScale)), (PositionSize + (5 * PositionScale)), Asset1['UnitSix']) #Unit 7 Asset1['UnitSeven'] = 0 Asset1['UnitSeven'] = np.where(Asset1['HighOverRollingMin'] > 1 + (7 * UniformMove), (PositionSize + (6 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitSeven'] = np.where(Asset1['UnitSeven'].shift(1) == (PositionSize + (6 * PositionScale)), (PositionSize + (6 * PositionScale)), Asset1['UnitSeven']) #Unit 8 Asset1['UnitEight'] = 0 Asset1['UnitEight'] = np.where(Asset1['HighOverRollingMin'] > 1 + (8 * UniformMove), (PositionSize + (7 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitEight'] = np.where(Asset1['UnitEight'].shift(1) == (PositionSize + (7 * PositionScale)), (PositionSize + (7 * PositionScale)), Asset1['UnitEight']) #Unit 9 Asset1['UnitNine'] = 0 Asset1['UnitNine'] = np.where(Asset1['HighOverRollingMin'] > 1 + (9 * UniformMove), (PositionSize + (8 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitNine'] = np.where(Asset1['UnitNine'].shift(1) == (PositionSize + (8 * PositionScale)), (PositionSize + (8 * PositionScale)), Asset1['UnitNine']) #Unit 10 Asset1['UnitTen'] = 0 Asset1['UnitTen'] = np.where(Asset1['HighOverRollingMin'] > 1 + (10 * UniformMove), (PositionSize + (9 * PositionScale)), 0) for i in range(0,HoldPeriod): Asset1['UnitTen'] = np.where(Asset1['UnitTen'].shift(1) == (PositionSize + (9 * PositionScale)), (PositionSize + (9 * PositionScale)), Asset1['UnitTen']) #Unit 11 Asset1['UnitEleven'] = 0 Asset1['UnitEleven'] = np.where(Asset1['HighOverRollingMin'] > 1 + (11 * UniformMove), PositionSize, 0) for i in range(0,HoldPeriod): Asset1['UnitEleven'] = np.where(Asset1['UnitEleven'].shift(1) == PositionSize, PositionSize, Asset1['UnitEleven']) Asset1['SumUnits'] = Asset1[['UnitOne','UnitTwo','UnitThree','UnitFour',#]].sum(axis = 1) 'UnitFive','UnitSix','UnitSeven','UnitEight','UnitNine','UnitTen','UnitEleven']].sum(axis = 1) Asset1['Regime'] = np.where(Asset1['SumUnits'] >= 1, -1,0) Asset1['Strategy'] = Asset1['Regime'].shift(1) * Asset1['LogRet'] * (Asset1['SumUnits']/100) #Asset1['Strategy'].cumsum().apply(np.exp).plot(grid=True, # figsize=(8,5)) Asset1['Multiplier'] = Asset1['Strategy'].cumsum().apply(np.exp) drawdown = 1 - Asset1['Multiplier'].div(Asset1['Multiplier'].cummax()) drawdown = drawdown.fillna(0) #s['drawdown'] = 1 - s['Multiplier'].div(s['Multiplier'].cummax()) MaxDD = max(drawdown) Counter = Counter + 1 if MaxDD > .4: continue dailyreturn = Asset1['Strategy'].mean() if dailyreturn < .0015: continue dailyvol = Asset1['Strategy'].std() if dailyvol == 0: continue Sharpe = dailyreturn/dailyvol SharpeOverMaxDD = Sharpe/MaxDD Empty.append(MinWindow) Empty.append(HoldPeriod) Empty.append(PositionSize) Empty.append(UniformMove) Empty.append(PositionScale) Empty.append(dailyreturn) Empty.append(dailyvol) Empty.append(Sharpe) Empty.append(SharpeOverMaxDD) Empty.append(MaxDD) Emptyseries = pd.Series(Empty) Dataset[n] = Emptyseries.values Empty[:] = [] print(Counter) #Trades = Trades.rename(index={0: "ExitTaken", 1: "LengthOfTrade", 2: "EntryPriceUnitOne", # 3: "StopPriceUnitOne", 4: "SubIndexOfEntry", 5: "SubIndexOfExit", # 6: "TradeDirection", 7: "OpenPriceOnGap", 8: "TradeReturn"}) z1 = Dataset.iloc[7] w1 = np.percentile(z1, 80) v1 = [] #this variable stores the Nth percentile of top performers DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset for h in z1: if h > w1: v1.append(h) for j in v1: r = Dataset.columns[(Dataset == j).iloc[7]] DS1W = pd.concat([DS1W,Dataset[r]], axis = 1) y = max(z1) k = Dataset.columns[(Dataset == y).iloc[7]] #this is the column number kfloat = float(k[0]) End = t.time() #print(End-Start, 'seconds later') print(Dataset[k])
apache-2.0
ElDeveloper/scikit-learn
sklearn/tests/test_kernel_approximation.py
244
7588
import numpy as np from scipy.sparse import csr_matrix from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal, assert_raises from sklearn.utils.testing import assert_less_equal from sklearn.metrics.pairwise import kernel_metrics from sklearn.kernel_approximation import RBFSampler from sklearn.kernel_approximation import AdditiveChi2Sampler from sklearn.kernel_approximation import SkewedChi2Sampler from sklearn.kernel_approximation import Nystroem from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel # generate data rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 50)) Y = rng.random_sample(size=(300, 50)) X /= X.sum(axis=1)[:, np.newaxis] Y /= Y.sum(axis=1)[:, np.newaxis] def test_additive_chi2_sampler(): # test that AdditiveChi2Sampler approximates kernel on random data # compute exact kernel # appreviations for easier formular X_ = X[:, np.newaxis, :] Y_ = Y[np.newaxis, :, :] large_kernel = 2 * X_ * Y_ / (X_ + Y_) # reduce to n_samples_x x n_samples_y by summing over features kernel = (large_kernel.sum(axis=2)) # approximate kernel mapping transform = AdditiveChi2Sampler(sample_steps=3) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) X_sp_trans = transform.fit_transform(csr_matrix(X)) Y_sp_trans = transform.transform(csr_matrix(Y)) assert_array_equal(X_trans, X_sp_trans.A) assert_array_equal(Y_trans, Y_sp_trans.A) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) # test error on invalid sample_steps transform = AdditiveChi2Sampler(sample_steps=4) assert_raises(ValueError, transform.fit, X) # test that the sample interval is set correctly sample_steps_available = [1, 2, 3] for sample_steps in sample_steps_available: # test that the sample_interval is initialized correctly transform = AdditiveChi2Sampler(sample_steps=sample_steps) assert_equal(transform.sample_interval, None) # test that the sample_interval is changed in the fit method transform.fit(X) assert_not_equal(transform.sample_interval_, None) # test that the sample_interval is set correctly sample_interval = 0.3 transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval) assert_equal(transform.sample_interval, sample_interval) transform.fit(X) assert_equal(transform.sample_interval_, sample_interval) def test_skewed_chi2_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel c = 0.03 # appreviations for easier formular X_c = (X + c)[:, np.newaxis, :] Y_c = (Y + c)[np.newaxis, :, :] # we do it in log-space in the hope that it's more stable # this array is n_samples_x x n_samples_y big x n_features log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) - np.log(X_c + Y_c)) # reduce to n_samples_x x n_samples_y by summing over features in log-space kernel = np.exp(log_kernel.sum(axis=2)) # approximate kernel mapping transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) def test_rbf_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel gamma = 10. kernel = rbf_kernel(X, Y, gamma=gamma) # approximate kernel mapping rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42) X_trans = rbf_transform.fit_transform(X) Y_trans = rbf_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased np.abs(error, out=error) assert_less_equal(np.max(error), 0.1) # nothing too far off assert_less_equal(np.mean(error), 0.05) # mean is fairly close def test_input_validation(): # Regression test: kernel approx. transformers should work on lists # No assertions; the old versions would simply crash X = [[1, 2], [3, 4], [5, 6]] AdditiveChi2Sampler().fit(X).transform(X) SkewedChi2Sampler().fit(X).transform(X) RBFSampler().fit(X).transform(X) X = csr_matrix(X) RBFSampler().fit(X).transform(X) def test_nystroem_approximation(): # some basic tests rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 4)) # With n_components = n_samples this is exact X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X) K = rbf_kernel(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) trans = Nystroem(n_components=2, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test callable kernel linear_kernel = lambda X, Y: np.dot(X, Y.T) trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test that available kernels fit and transform kernels_available = kernel_metrics() for kern in kernels_available: trans = Nystroem(n_components=2, kernel=kern, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) def test_nystroem_singular_kernel(): # test that nystroem works with singular kernel matrix rng = np.random.RandomState(0) X = rng.rand(10, 20) X = np.vstack([X] * 2) # duplicate samples gamma = 100 N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X) X_transformed = N.transform(X) K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) assert_true(np.all(np.isfinite(Y))) def test_nystroem_poly_kernel_params(): # Non-regression: Nystroem should pass other parameters beside gamma. rnd = np.random.RandomState(37) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=3.1, coef0=.1) nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=.1) X_transformed = nystroem.fit_transform(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) def test_nystroem_callable(): # Test Nystroem on a callable. rnd = np.random.RandomState(42) n_samples = 10 X = rnd.uniform(size=(n_samples, 4)) def logging_histogram_kernel(x, y, log): """Histogram kernel that writes to a log.""" log.append(1) return np.minimum(x, y).sum() kernel_log = [] X = list(X) # test input validation Nystroem(kernel=logging_histogram_kernel, n_components=(n_samples - 1), kernel_params={'log': kernel_log}).fit(X) assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
bsd-3-clause
bnaul/scikit-learn
examples/cross_decomposition/plot_compare_cross_decomposition.py
17
4897
""" =================================== Compare cross decomposition methods =================================== Simple usage of various cross decomposition algorithms: - PLSCanonical - PLSRegression, with multivariate response, a.k.a. PLS2 - PLSRegression, with univariate response, a.k.a. PLS1 - CCA Given 2 multivariate covarying two-dimensional datasets, X, and Y, PLS extracts the 'directions of covariance', i.e. the components of each datasets that explain the most shared variance between both datasets. This is apparent on the **scatterplot matrix** display: components 1 in dataset X and dataset Y are maximally correlated (points lie around the first diagonal). This is also true for components 2 in both dataset, however, the correlation across datasets for different components is weak: the point cloud is very spherical. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA # ############################################################################# # Dataset based latent variables model n = 500 # 2 latents vars: l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X_train = X[:n // 2] Y_train = Y[:n // 2] X_test = X[n // 2:] Y_test = Y[n // 2:] print("Corr(X)") print(np.round(np.corrcoef(X.T), 2)) print("Corr(Y)") print(np.round(np.corrcoef(Y.T), 2)) # ############################################################################# # Canonical (symmetric) PLS # Transform data # ~~~~~~~~~~~~~~ plsca = PLSCanonical(n_components=2) plsca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test) # Scatter plot of scores # ~~~~~~~~~~~~~~~~~~~~~~ # 1) On diagonal plot X vs Y scores on each components plt.figure(figsize=(12, 8)) plt.subplot(221) plt.scatter(X_train_r[:, 0], Y_train_r[:, 0], label="train", marker="o", s=25) plt.scatter(X_test_r[:, 0], Y_test_r[:, 0], label="test", marker="o", s=25) plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 1: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") plt.subplot(224) plt.scatter(X_train_r[:, 1], Y_train_r[:, 1], label="train", marker="o", s=25) plt.scatter(X_test_r[:, 1], Y_test_r[:, 1], label="test", marker="o", s=25) plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 2: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") # 2) Off diagonal plot components 1 vs 2 for X and Y plt.subplot(222) plt.scatter(X_train_r[:, 0], X_train_r[:, 1], label="train", marker="*", s=50) plt.scatter(X_test_r[:, 0], X_test_r[:, 1], label="test", marker="*", s=50) plt.xlabel("X comp. 1") plt.ylabel("X comp. 2") plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.subplot(223) plt.scatter(Y_train_r[:, 0], Y_train_r[:, 1], label="train", marker="*", s=50) plt.scatter(Y_test_r[:, 0], Y_test_r[:, 1], label="test", marker="*", s=50) plt.xlabel("Y comp. 1") plt.ylabel("Y comp. 2") plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)' % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.show() # ############################################################################# # PLS regression, with multivariate response, a.k.a. PLS2 n = 1000 q = 3 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) B = np.array([[1, 2] + [0] * (p - 2)] * q).T # each Yj = 1*X1 + 2*X2 + noize Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5 pls2 = PLSRegression(n_components=3) pls2.fit(X, Y) print("True B (such that: Y = XB + Err)") print(B) # compare pls2.coef_ with B print("Estimated B") print(np.round(pls2.coef_, 1)) pls2.predict(X) # PLS regression, with univariate response, a.k.a. PLS1 n = 1000 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5 pls1 = PLSRegression(n_components=3) pls1.fit(X, y) # note that the number of components exceeds 1 (the dimension of y) print("Estimated betas") print(np.round(pls1.coef_, 1)) # ############################################################################# # CCA (PLS mode B with symmetric deflation) cca = CCA(n_components=2) cca.fit(X_train, Y_train) X_train_r, Y_train_r = cca.transform(X_train, Y_train) X_test_r, Y_test_r = cca.transform(X_test, Y_test)
bsd-3-clause
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/IPython/terminal/ipapp.py
7
13910
#!/usr/bin/env python # encoding: utf-8 """ The :class:`~IPython.core.application.Application` object for the command line :command:`ipython` program. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import absolute_import from __future__ import print_function import logging import os import sys import warnings from traitlets.config.loader import Config from traitlets.config.application import boolean_flag, catch_config_error, Application from IPython.core import release from IPython.core import usage from IPython.core.completer import IPCompleter from IPython.core.crashhandler import CrashHandler from IPython.core.formatters import PlainTextFormatter from IPython.core.history import HistoryManager from IPython.core.application import ( ProfileDir, BaseIPythonApplication, base_flags, base_aliases ) from IPython.core.magics import ScriptMagics from IPython.core.shellapp import ( InteractiveShellApp, shell_flags, shell_aliases ) from IPython.extensions.storemagic import StoreMagics from .interactiveshell import TerminalInteractiveShell from IPython.paths import get_ipython_dir from traitlets import ( Bool, List, Dict, default, observe, ) #----------------------------------------------------------------------------- # Globals, utilities and helpers #----------------------------------------------------------------------------- _examples = """ ipython --matplotlib # enable matplotlib integration ipython --matplotlib=qt # enable matplotlib integration with qt4 backend ipython --log-level=DEBUG # set logging to DEBUG ipython --profile=foo # start with profile foo ipython profile create foo # create profile foo w/ default config files ipython help profile # show the help for the profile subcmd ipython locate # print the path to the IPython directory ipython locate profile foo # print the path to the directory for profile `foo` """ #----------------------------------------------------------------------------- # Crash handler for this application #----------------------------------------------------------------------------- class IPAppCrashHandler(CrashHandler): """sys.excepthook for IPython itself, leaves a detailed report on disk.""" def __init__(self, app): contact_name = release.author contact_email = release.author_email bug_tracker = 'https://github.com/ipython/ipython/issues' super(IPAppCrashHandler,self).__init__( app, contact_name, contact_email, bug_tracker ) def make_report(self,traceback): """Return a string containing a crash report.""" sec_sep = self.section_sep # Start with parent report report = [super(IPAppCrashHandler, self).make_report(traceback)] # Add interactive-specific info we may have rpt_add = report.append try: rpt_add(sec_sep+"History of session input:") for line in self.app.shell.user_ns['_ih']: rpt_add(line) rpt_add('\n*** Last line of input (may not be in above history):\n') rpt_add(self.app.shell._last_input_line+'\n') except: pass return ''.join(report) #----------------------------------------------------------------------------- # Aliases and Flags #----------------------------------------------------------------------------- flags = dict(base_flags) flags.update(shell_flags) frontend_flags = {} addflag = lambda *args: frontend_flags.update(boolean_flag(*args)) addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax', 'Turn on auto editing of files with syntax errors.', 'Turn off auto editing of files with syntax errors.' ) addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt', "Force simple minimal prompt using `raw_input`", "Use a rich interactive prompt with prompt_toolkit", ) addflag('banner', 'TerminalIPythonApp.display_banner', "Display a banner upon starting IPython.", "Don't display a banner upon starting IPython." ) addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit', """Set to confirm when you try to exit IPython with an EOF (Control-D in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a direct exit without any confirmation.""", "Don't prompt the user when exiting." ) addflag('term-title', 'TerminalInteractiveShell.term_title', "Enable auto setting the terminal title.", "Disable auto setting the terminal title." ) classic_config = Config() classic_config.InteractiveShell.cache_size = 0 classic_config.PlainTextFormatter.pprint = False classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts' classic_config.InteractiveShell.separate_in = '' classic_config.InteractiveShell.separate_out = '' classic_config.InteractiveShell.separate_out2 = '' classic_config.InteractiveShell.colors = 'NoColor' classic_config.InteractiveShell.xmode = 'Plain' frontend_flags['classic']=( classic_config, "Gives IPython a similar feel to the classic Python prompt." ) # # log doesn't make so much sense this way anymore # paa('--log','-l', # action='store_true', dest='InteractiveShell.logstart', # help="Start logging to the default log file (./ipython_log.py).") # # # quick is harder to implement frontend_flags['quick']=( {'TerminalIPythonApp' : {'quick' : True}}, "Enable quick startup with no config files." ) frontend_flags['i'] = ( {'TerminalIPythonApp' : {'force_interact' : True}}, """If running code from the command line, become interactive afterwards. It is often useful to follow this with `--` to treat remaining flags as script arguments. """ ) flags.update(frontend_flags) aliases = dict(base_aliases) aliases.update(shell_aliases) #----------------------------------------------------------------------------- # Main classes and functions #----------------------------------------------------------------------------- class LocateIPythonApp(BaseIPythonApplication): description = """print the path to the IPython dir""" subcommands = Dict(dict( profile=('IPython.core.profileapp.ProfileLocate', "print the path to an IPython profile directory", ), )) def start(self): if self.subapp is not None: return self.subapp.start() else: print(self.ipython_dir) class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp): name = u'ipython' description = usage.cl_usage crash_handler_class = IPAppCrashHandler examples = _examples flags = Dict(flags) aliases = Dict(aliases) classes = List() @default('classes') def _classes_default(self): """This has to be in a method, for TerminalIPythonApp to be available.""" return [ InteractiveShellApp, # ShellApp comes before TerminalApp, because self.__class__, # it will also affect subclasses (e.g. QtConsole) TerminalInteractiveShell, HistoryManager, ProfileDir, PlainTextFormatter, IPCompleter, ScriptMagics, StoreMagics, ] deprecated_subcommands = dict( qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp', """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console.""" ), notebook=('notebook.notebookapp.NotebookApp', """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server.""" ), console=('jupyter_console.app.ZMQTerminalIPythonApp', """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console.""" ), nbconvert=('nbconvert.nbconvertapp.NbConvertApp', "DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats." ), trust=('nbformat.sign.TrustNotebookApp', "DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load." ), kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp', "DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications." ), ) subcommands = dict( profile = ("IPython.core.profileapp.ProfileApp", "Create and manage IPython profiles." ), kernel = ("ipykernel.kernelapp.IPKernelApp", "Start a kernel without an attached frontend." ), locate=('IPython.terminal.ipapp.LocateIPythonApp', LocateIPythonApp.description ), history=('IPython.core.historyapp.HistoryApp', "Manage the IPython history database." ), ) deprecated_subcommands['install-nbextension'] = ( "notebook.nbextensions.InstallNBExtensionApp", "DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files" ) subcommands.update(deprecated_subcommands) # *do* autocreate requested profile, but don't create the config file. auto_create=Bool(True) # configurables quick = Bool(False, help="""Start IPython quickly by skipping the loading of config files.""" ).tag(config=True) @observe('quick') def _quick_changed(self, change): if change['new']: self.load_config_file = lambda *a, **kw: None display_banner = Bool(True, help="Whether to display a banner upon starting IPython." ).tag(config=True) # if there is code of files to run from the cmd line, don't interact # unless the --i flag (App.force_interact) is true. force_interact = Bool(False, help="""If a command or file is given via the command-line, e.g. 'ipython foo.py', start an interactive shell after executing the file or command.""" ).tag(config=True) @observe('force_interact') def _force_interact_changed(self, change): if change['new']: self.interact = True @observe('file_to_run', 'code_to_run', 'module_to_run') def _file_to_run_changed(self, change): new = change['new'] if new: self.something_to_run = True if new and not self.force_interact: self.interact = False # internal, not-configurable something_to_run=Bool(False) def parse_command_line(self, argv=None): """override to allow old '-pylab' flag with deprecation warning""" argv = sys.argv[1:] if argv is None else argv if '-pylab' in argv: # deprecated `-pylab` given, # warn and transform into current syntax argv = argv[:] # copy, don't clobber idx = argv.index('-pylab') warnings.warn("`-pylab` flag has been deprecated.\n" " Use `--matplotlib <backend>` and import pylab manually.") argv[idx] = '--pylab' return super(TerminalIPythonApp, self).parse_command_line(argv) @catch_config_error def initialize(self, argv=None): """Do actions after construct, but before starting the app.""" super(TerminalIPythonApp, self).initialize(argv) if self.subapp is not None: # don't bother initializing further, starting subapp return # print self.extra_args if self.extra_args and not self.something_to_run: self.file_to_run = self.extra_args[0] self.init_path() # create the shell self.init_shell() # and draw the banner self.init_banner() # Now a variety of things that happen after the banner is printed. self.init_gui_pylab() self.init_extensions() self.init_code() def init_shell(self): """initialize the InteractiveShell instance""" # Create an InteractiveShell instance. # shell.display_banner should always be False for the terminal # based app, because we call shell.show_banner() by hand below # so the banner shows *before* all extension loading stuff. self.shell = TerminalInteractiveShell.instance(parent=self, profile_dir=self.profile_dir, ipython_dir=self.ipython_dir, user_ns=self.user_ns) self.shell.configurables.append(self) def init_banner(self): """optionally display the banner""" if self.display_banner and self.interact: self.shell.show_banner() # Make sure there is a space below the banner. if self.log_level <= logging.INFO: print() def _pylab_changed(self, name, old, new): """Replace --pylab='inline' with --pylab='auto'""" if new == 'inline': warnings.warn("'inline' not available as pylab backend, " "using 'auto' instead.") self.pylab = 'auto' def start(self): if self.subapp is not None: return self.subapp.start() # perform any prexec steps: if self.interact: self.log.debug("Starting IPython's mainloop...") self.shell.mainloop() else: self.log.debug("IPython not interactive...") def load_default_config(ipython_dir=None): """Load the default config file from the default ipython_dir. This is useful for embedded shells. """ if ipython_dir is None: ipython_dir = get_ipython_dir() profile_dir = os.path.join(ipython_dir, 'profile_default') config = Config() for cf in Application._load_config_files("ipython_config", path=profile_dir): config.update(cf) return config launch_new_instance = TerminalIPythonApp.launch_instance if __name__ == '__main__': launch_new_instance()
gpl-3.0
steffengraber/nest-simulator
pynest/examples/spatial/test_3d_gauss.py
14
2306
# -*- coding: utf-8 -*- # # test_3d_gauss.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ A spatial network in 3D with Gaussian connection probabilities --------------------------------------------------------------- Hans Ekkehard Plesser, UMB """ import nest import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D nest.ResetKernel() pos = nest.spatial.free(nest.random.uniform(-0.5, 0.5), extent=[1.5, 1.5, 1.5]) l1 = nest.Create('iaf_psc_alpha', 1000, positions=pos) # visualize # extract position information, transpose to list of x, y and z positions xpos, ypos, zpos = zip(*nest.GetPosition(l1)) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(xpos, ypos, zpos, s=15, facecolor='b') # Gaussian connections in full box volume [-0.75,0.75]**3 nest.Connect(l1, l1, {'rule': 'pairwise_bernoulli', 'p': nest.spatial_distributions.gaussian(nest.spatial.distance, std=0.25), 'allow_autapses': False, 'mask': {'box': {'lower_left': [-0.75, -0.75, -0.75], 'upper_right': [0.75, 0.75, 0.75]}}}) # show connections from center element # sender shown in red, targets in green ctr = nest.FindCenterElement(l1) xtgt, ytgt, ztgt = zip(*nest.GetTargetPositions(ctr, l1)[0]) xctr, yctr, zctr = nest.GetPosition(ctr) ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r') ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g') tgts = nest.GetTargetNodes(ctr, l1)[0] distances = nest.Distance(ctr, l1) tgt_distances = [d for i, d in enumerate(distances) if i + 1 in tgts] plt.figure() plt.hist(tgt_distances, 25) plt.show()
gpl-2.0
sternb0t/django-pandas
setup.py
2
1324
from setuptools import setup, find_packages long_description = ( open('README.rst').read() + '\n\n' + open('CHANGES.rst').read() ) MAJOR = 0 MINOR = 3 MICRO = 0 VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) setup( name='django-pandas', version=VERSION, description='Tools for working with pydata.pandas in your Django projects', long_description=long_description, author='Christopher Clarke', author_email='cclarke@chrisdev.com', url='https://github.com/chrisdev/django-pandas/', packages=find_packages(), install_requires=[ 'Django>=1.4.2', 'django-model-utils>=1.4.0', 'pandas>=0.12.0', ], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Framework :: Django', ], zip_safe=False, tests_require=[ "Django>=1.4.2", "django-model-utils>=1.4.0", "pandas>=0.12.0", ], test_suite="runtests.runtests" )
bsd-3-clause
rvraghav93/scikit-learn
examples/ensemble/plot_bias_variance.py
357
7324
""" ============================================================ Single estimator versus bagging: bias-variance decomposition ============================================================ This example illustrates and compares the bias-variance decomposition of the expected mean squared error of a single estimator against a bagging ensemble. In regression, the expected mean squared error of an estimator can be decomposed in terms of bias, variance and noise. On average over datasets of the regression problem, the bias term measures the average amount by which the predictions of the estimator differ from the predictions of the best possible estimator for the problem (i.e., the Bayes model). The variance term measures the variability of the predictions of the estimator when fit over different instances LS of the problem. Finally, the noise measures the irreducible part of the error which is due the variability in the data. The upper left figure illustrates the predictions (in dark red) of a single decision tree trained over a random dataset LS (the blue dots) of a toy 1d regression problem. It also illustrates the predictions (in light red) of other single decision trees trained over other (and different) randomly drawn instances LS of the problem. Intuitively, the variance term here corresponds to the width of the beam of predictions (in light red) of the individual estimators. The larger the variance, the more sensitive are the predictions for `x` to small changes in the training set. The bias term corresponds to the difference between the average prediction of the estimator (in cyan) and the best possible model (in dark blue). On this problem, we can thus observe that the bias is quite low (both the cyan and the blue curves are close to each other) while the variance is large (the red beam is rather wide). The lower left figure plots the pointwise decomposition of the expected mean squared error of a single decision tree. It confirms that the bias term (in blue) is low while the variance is large (in green). It also illustrates the noise part of the error which, as expected, appears to be constant and around `0.01`. The right figures correspond to the same plots but using instead a bagging ensemble of decision trees. In both figures, we can observe that the bias term is larger than in the previous case. In the upper right figure, the difference between the average prediction (in cyan) and the best possible model is larger (e.g., notice the offset around `x=2`). In the lower right figure, the bias curve is also slightly higher than in the lower left figure. In terms of variance however, the beam of predictions is narrower, which suggests that the variance is lower. Indeed, as the lower right figure confirms, the variance term (in green) is lower than for single decision trees. Overall, the bias- variance decomposition is therefore no longer the same. The tradeoff is better for bagging: averaging several decision trees fit on bootstrap copies of the dataset slightly increases the bias term but allows for a larger reduction of the variance, which results in a lower overall mean squared error (compare the red curves int the lower figures). The script output also confirms this intuition. The total error of the bagging ensemble is lower than the total error of a single decision tree, and this difference indeed mainly stems from a reduced variance. For further details on bias-variance decomposition, see section 7.3 of [1]_. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning", Springer, 2009. """ print(__doc__) # Author: Gilles Louppe <g.louppe@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor # Settings n_repeat = 50 # Number of iterations for computing expectations n_train = 50 # Size of the training set n_test = 1000 # Size of the test set noise = 0.1 # Standard deviation of the noise np.random.seed(0) # Change this for exploring the bias-variance decomposition of other # estimators. This should work well for estimators with high variance (e.g., # decision trees or KNN), but poorly for estimators with low variance (e.g., # linear models). estimators = [("Tree", DecisionTreeRegressor()), ("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))] n_estimators = len(estimators) # Generate data def f(x): x = x.ravel() return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2) def generate(n_samples, noise, n_repeat=1): X = np.random.rand(n_samples) * 10 - 5 X = np.sort(X) if n_repeat == 1: y = f(X) + np.random.normal(0.0, noise, n_samples) else: y = np.zeros((n_samples, n_repeat)) for i in range(n_repeat): y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples) X = X.reshape((n_samples, 1)) return X, y X_train = [] y_train = [] for i in range(n_repeat): X, y = generate(n_samples=n_train, noise=noise) X_train.append(X) y_train.append(y) X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat) # Loop over estimators to compare for n, (name, estimator) in enumerate(estimators): # Compute predictions y_predict = np.zeros((n_test, n_repeat)) for i in range(n_repeat): estimator.fit(X_train[i], y_train[i]) y_predict[:, i] = estimator.predict(X_test) # Bias^2 + Variance + Noise decomposition of the mean squared error y_error = np.zeros(n_test) for i in range(n_repeat): for j in range(n_repeat): y_error += (y_test[:, j] - y_predict[:, i]) ** 2 y_error /= (n_repeat * n_repeat) y_noise = np.var(y_test, axis=1) y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2 y_var = np.var(y_predict, axis=1) print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) " " + {3:.4f} (var) + {4:.4f} (noise)".format(name, np.mean(y_error), np.mean(y_bias), np.mean(y_var), np.mean(y_noise))) # Plot figures plt.subplot(2, n_estimators, n + 1) plt.plot(X_test, f(X_test), "b", label="$f(x)$") plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$") for i in range(n_repeat): if i == 0: plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$") else: plt.plot(X_test, y_predict[:, i], "r", alpha=0.05) plt.plot(X_test, np.mean(y_predict, axis=1), "c", label="$\mathbb{E}_{LS} \^y(x)$") plt.xlim([-5, 5]) plt.title(name) if n == 0: plt.legend(loc="upper left", prop={"size": 11}) plt.subplot(2, n_estimators, n_estimators + n + 1) plt.plot(X_test, y_error, "r", label="$error(x)$") plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"), plt.plot(X_test, y_var, "g", label="$variance(x)$"), plt.plot(X_test, y_noise, "c", label="$noise(x)$") plt.xlim([-5, 5]) plt.ylim([0, 0.1]) if n == 0: plt.legend(loc="upper left", prop={"size": 11}) plt.show()
bsd-3-clause
solarjoe/numpy
numpy/lib/function_base.py
3
169215
from __future__ import division, absolute_import, print_function import collections import re import sys import warnings import numpy as np import numpy.core.numeric as _nx from numpy.core import linspace, atleast_1d, atleast_2d, transpose from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, integer, isscalar, absolute, AxisError ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, mod, exp, log10, not_equal, subtract ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum ) from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import ( _insert, add_docstring, digitize, bincount, normalize_axis_index, interp as compiled_interp, interp_complex as compiled_interp_complex ) from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc from numpy.compat import long from numpy.compat.py3k import basestring if sys.version_info[0] < 3: # Force range to be a generator, for np.delete's usage. range = xrange import __builtin__ as builtins else: import builtins __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' ] def rot90(m, k=1, axes=(0,1)): """ Rotate an array by 90 degrees in the plane specified by axes. Rotation direction is from the first towards the second axis. .. versionadded:: 1.12.0 Parameters ---------- m : array_like Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. axes: (2,) array_like The array is rotated in the plane defined by the axes. Axes must be different. Returns ------- y : ndarray A rotated view of `m`. See Also -------- flip : Reverse the order of elements in an array along the given axis. fliplr : Flip an array horizontally. flipud : Flip an array vertically. Notes ----- rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) Examples -------- >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], [3, 4]]) >>> np.rot90(m) array([[2, 4], [1, 3]]) >>> np.rot90(m, 2) array([[4, 3], [2, 1]]) >>> m = np.arange(8).reshape((2,2,2)) >>> np.rot90(m, 1, (1,2)) array([[[1, 3], [0, 2]], [[5, 7], [4, 6]]]) """ axes = tuple(axes) if len(axes) != 2: raise ValueError("len(axes) must be 2.") m = asanyarray(m) if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: raise ValueError("Axes must be different.") if (axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim): raise ValueError("Axes={} out of range for array of ndim={}." .format(axes, m.ndim)) k %= 4 if k == 0: return m[:] if k == 2: return flip(flip(m, axes[0]), axes[1]) axes_list = arange(0, m.ndim) (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]]) if k == 1: return transpose(flip(m,axes[1]), axes_list) else: # k == 3 return flip(transpose(m, axes_list), axes[1]) def flip(m, axis): """ Reverse the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. .. versionadded:: 1.12.0 Parameters ---------- m : array_like Input array. axis : integer Axis in array, which entries are reversed. Returns ------- out : array_like A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. See Also -------- flipud : Flip an array vertically (axis=0). fliplr : Flip an array horizontally (axis=1). Notes ----- flip(m, 0) is equivalent to flipud(m). flip(m, 1) is equivalent to fliplr(m). flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. Examples -------- >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) >>> flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) >>> A = np.random.randn(3,4,5) >>> np.all(flip(A,2) == A[:,:,::-1,...]) True """ if not hasattr(m, 'ndim'): m = asarray(m) indexer = [slice(None)] * m.ndim try: indexer[axis] = slice(None, None, -1) except IndexError: raise ValueError("axis=%i is invalid for the %i-dimensional input array" % (axis, m.ndim)) return m[tuple(indexer)] def iterable(y): """ Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : bool Return ``True`` if the object has an iterator method or is a sequence and ``False`` otherwise. Examples -------- >>> np.iterable([1, 2, 3]) True >>> np.iterable(2) False """ try: iter(y) except TypeError: return False return True def _hist_bin_sqrt(x): """ Square root histogram bin estimator. Bin width is inversely proportional to the data size. Used by many programs for its simplicity. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / np.sqrt(x.size) def _hist_bin_sturges(x): """ Sturges histogram bin estimator. A very simplistic estimator based on the assumption of normality of the data. This estimator has poor performance for non-normal data, which becomes especially obvious for large data sets. The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (np.log2(x.size) + 1.0) def _hist_bin_rice(x): """ Rice histogram bin estimator. Another simple estimator with no normality assumption. It has better performance for large data than Sturges, but tends to overestimate the number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal). The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return x.ptp() / (2.0 * x.size ** (1.0 / 3)) def _hist_bin_scott(x): """ Scott histogram bin estimator. The binwidth is proportional to the standard deviation of the data and inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) def _hist_bin_doane(x): """ Doane's histogram bin estimator. Improved version of Sturges' formula which works better for non-normal data. See stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ if x.size > 2: sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) sigma = np.std(x) if sigma > 0.0: # These three operations add up to # g1 = np.mean(((x - np.mean(x)) / sigma)**3) # but use only one temp array instead of three temp = x - np.mean(x) np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) g1 = np.mean(temp) return x.ptp() / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1)) return 0.0 def _hist_bin_fd(x): """ The Freedman-Diaconis histogram bin estimator. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate binwidth. It is considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. If the IQR is 0, this function returns 1 for the number of bins. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ iqr = np.subtract(*np.percentile(x, [75, 25])) return 2.0 * iqr * x.size ** (-1.0 / 3.0) def _hist_bin_auto(x): """ Histogram bin estimator that uses the minimum width of the Freedman-Diaconis and Sturges estimators. The FD estimator is usually the most robust method, but its width estimate tends to be too large for small `x`. The Sturges estimator is quite good for small (<1000) datasets and is the default in the R language. This method gives good off the shelf behaviour. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. See Also -------- _hist_bin_fd, _hist_bin_sturges """ # There is no need to check for zero here. If ptp is, so is IQR and # vice versa. Either both are zero or neither one is. return min(_hist_bin_fd(x), _hist_bin_sturges(x)) # Private dict initialized at module load time _hist_bin_selectors = {'auto': _hist_bin_auto, 'doane': _hist_bin_doane, 'fd': _hist_bin_fd, 'rice': _hist_bin_rice, 'scott': _hist_bin_scott, 'sqrt': _hist_bin_sqrt, 'sturges': _hist_bin_sturges} def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): r""" Compute the histogram of a set of data. Parameters ---------- a : array_like Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars or str, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string from the list below, `histogram` will use the method chosen to calculate the optimal bin width and consequently the number of bins (see `Notes` for more detail on the estimators) from the data that falls within the requested range. While the bin width will be optimal for the actual data in the range, the number of bins will be computed to fill the entire range, including the empty portions. For visualisation, using the 'auto' option is suggested. Weighted data is not supported for automated bin size selection. 'auto' Maximum of the 'sturges' and 'fd' estimators. Provides good all around performance. 'fd' (Freedman Diaconis Estimator) Robust (resilient to outliers) estimator that takes into account data variability and data size. 'doane' An improved version of Sturges' estimator that works better with non-normal datasets. 'scott' Less robust estimator that that takes into account data variability and data size. 'rice' Estimator does not take variability into account, only data size. Commonly overestimates number of bins required. 'sturges' R's default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets. 'sqrt' Square root (of data size) estimator, used by Excel and other programs for its speed and simplicity. range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are ignored. The first element of the range must be less than or equal to the second. `range` affects the automatic bin computation as well. While bin width is computed to be optimal based on the actual data within `range`, the bin count will fill the entire range including portions containing no data. normed : bool, optional This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy behavior. It will be removed in NumPy 2.0.0. Use the ``density`` keyword instead. If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that this latter behavior is known to be buggy with unequal bin widths; use ``density`` instead. weights : array_like, optional An array of weights, of the same shape as `a`. Each value in `a` only contributes its associated weight towards the bin count (instead of 1). If `density` is True, the weights are normalized, so that the integral of the density over the range remains 1. density : bool, optional If ``False``, the result will contain the number of samples in each bin. If ``True``, the result is the value of the probability *density* function at the bin, normalized such that the *integral* over the range is 1. Note that the sum of the histogram values will not be equal to 1 unless bins of unity width are chosen; it is not a probability *mass* function. Overrides the ``normed`` keyword if given. Returns ------- hist : array The values of the histogram. See `density` and `weights` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- histogramdd, bincount, searchsorted, digitize Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is:: [1, 2, 3, 4] then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 1.11.0 The methods to estimate the optimal number of bins are well founded in literature, and are inspired by the choices R provides for histogram visualisation. Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal, which is why it appears in most estimators. These are simply plug-in methods that give good starting points for number of bins. In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins. All estimators that compute bin counts are recast to bin width using the `ptp` of the data. The final bin count is obtained from ``np.round(np.ceil(range / h))`. 'Auto' (maximum of the 'Sturges' and 'FD' estimators) A compromise to get a good value. For small datasets the Sturges value will usually be chosen, while larger datasets will usually default to FD. Avoids the overly conservative behaviour of FD and Sturges for small and large datasets respectively. Switchover point is usually :math:`a.size \approx 1000`. 'FD' (Freedman Diaconis Estimator) .. math:: h = 2 \frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size. Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. 'Scott' .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} The binwidth is proportional to the standard deviation of the data and inversely proportional to cube root of ``x.size``. Can be too conservative for small datasets, but is quite good for large datasets. The standard deviation is not very robust to outliers. Values are very similar to the Freedman-Diaconis estimator in the absence of outliers. 'Rice' .. math:: n_h = 2n^{1/3} The number of bins is only proportional to cube root of ``a.size``. It tends to overestimate the number of bins and it does not take into account data variability. 'Sturges' .. math:: n_h = \log _{2}n+1 The number of bins is the base 2 log of ``a.size``. This estimator assumes normality of data and is too conservative for larger, non-normal datasets. This is the default method in R's ``hist`` method. 'Doane' .. math:: n_h = 1 + \log_{2}(n) + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) g_1 = mean[(\frac{x - \mu}{\sigma})^3] \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} An improved version of Sturges' formula that produces better estimates for non-normal datasets. This estimator attempts to account for the skew of the data. 'Sqrt' .. math:: n_h = \sqrt n The simplest and fastest estimator. Only takes into account the data size. Examples -------- >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist * np.diff(bin_edges)) 1.0 .. versionadded:: 1.11.0 Automated Bin Selection Methods example, using 2 peak random data with 2000 points: >>> import matplotlib.pyplot as plt >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() """ a = asarray(a) if weights is not None: weights = asarray(weights) if np.any(weights.shape != a.shape): raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() # Do not modify the original value of range so we can check for `None` if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. mn, mx = 0.0, 1.0 else: mn, mx = a.min() + 0.0, a.max() + 0.0 else: mn, mx = [mi + 0.0 for mi in range] if mn > mx: raise ValueError( 'max must be larger than min in range parameter.') if not np.all(np.isfinite([mn, mx])): raise ValueError( 'range parameter must be finite.') if mn == mx: mn -= 0.5 mx += 0.5 if isinstance(bins, basestring): # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated if bins not in _hist_bin_selectors: raise ValueError("{0} not a valid estimator for bins".format(bins)) if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") # Make a reference to `a` b = a # Update the reference if the range needs truncation if range is not None: keep = (a >= mn) keep &= (a <= mx) if not np.logical_and.reduce(keep): b = a[keep] if b.size == 0: bins = 1 else: # Do not call selectors on empty arrays width = _hist_bin_selectors[bins](b) if width: bins = int(np.ceil((mx - mn) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. bins = 1 # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = np.dtype(np.intp) else: ntype = weights.dtype # We set a block size, as this allows us to iterate over chunks when # computing histograms, to minimize memory usage. BLOCK = 65536 if not iterable(bins): if np.isscalar(bins) and bins < 1: raise ValueError( '`bins` should be a positive integer.') # At this point, if the weights are not integer, floating point, or # complex, we have to use the slow algorithm. if weights is not None and not (np.can_cast(weights.dtype, np.double) or np.can_cast(weights.dtype, complex)): bins = linspace(mn, mx, bins + 1, endpoint=True) if not iterable(bins): # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram n = np.zeros(bins, ntype) # Pre-compute histogram scaling factor norm = bins / (mx - mn) # Compute the bin edges for potential correction. bin_edges = linspace(mn, mx, bins + 1, endpoint=True) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it # is 2x as fast) and it results in a memory footprint 3x lower in the # limit of large arrays. for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] if weights is None: tmp_w = None else: tmp_w = weights[i:i + BLOCK] # Only include values in the right range keep = (tmp_a >= mn) keep &= (tmp_a <= mx) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a_data = tmp_a.astype(float) tmp_a = tmp_a_data - mn tmp_a *= norm # Compute the bin indices, and for values that lie exactly on mx we # need to subtract one indices = tmp_a.astype(np.intp) indices[indices == bins] -= 1 # The index computation is not guaranteed to give exactly # consistent results within ~1 ULP of the bin edges. decrement = tmp_a_data < bin_edges[indices] indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. increment = ((tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins) n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins) else: n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype) # Rename the bin edges for return. bins = bin_edges else: bins = asarray(bins) if (np.diff(bins) < 0).any(): raise ValueError( 'bins must increase monotonically.') # Initialize empty histogram n = np.zeros(bins.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) n += np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): tmp_a = a[i:i+BLOCK] tmp_w = weights[i:i+BLOCK] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] cw = np.concatenate(([zero, ], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), sa.searchsorted(bins[-1], 'right')] n += cw[bin_index] n = np.diff(n) if density is not None: if density: db = array(np.diff(bins), float) return n/db/n.sum(), bins else: return n, bins else: # deprecated, buggy behavior. Remove for NumPy 2.0.0 if normed: db = array(np.diff(bins), float) return n/(n*db).sum(), bins else: return n, bins def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """ Compute the multidimensional histogram of some data. Parameters ---------- sample : array_like The data to be histogrammed. It must be an (N,D) array or data that can be converted to such. The rows of the resulting array are the coordinates of points in a D dimensional polytope. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density ``bin_count / sample_count / bin_volume``. weights : (N,) array_like, optional An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. Weights are normalized to 1 if normed is True. If normed is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray The multidimensional histogram of sample x. See normed and weights for the different possible semantics. edges : list A list of D arrays describing the bin edges for each dimension. See Also -------- histogram: 1-D histogram histogram2d: 2-D histogram Examples -------- >>> r = np.random.randn(100,3) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) """ try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = atleast_2d(sample).T N, D = sample.shape nbin = empty(D, int) edges = D*[None] dedges = D*[None] if weights is not None: weights = asarray(weights) try: M = len(bins) if M != D: raise ValueError( 'The dimension of bins must be equal to the dimension of the ' ' sample x.') except TypeError: # bins is an integer bins = D*[bins] # Select range for each dimension # Used only if number of bins is given. if range is None: # Handle empty input. Range can't be determined in that case, use 0-1. if N == 0: smin = zeros(D) smax = ones(D) else: smin = atleast_1d(array(sample.min(0), float)) smax = atleast_1d(array(sample.max(0), float)) else: if not np.all(np.isfinite(range)): raise ValueError( 'range parameter must be finite.') smin = zeros(D) smax = zeros(D) for i in arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # avoid rounding issues for comparisons when dealing with inexact types if np.issubdtype(sample.dtype, np.inexact): edge_dt = sample.dtype else: edge_dt = float # Create edge arrays for i in arange(D): if isscalar(bins[i]): if bins[i] < 1: raise ValueError( "Element at index %s in `bins` should be a positive " "integer." % i) nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) else: edges[i] = asarray(bins[i], edge_dt) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = diff(edges[i]) if np.any(np.asarray(dedges[i]) <= 0): raise ValueError( "Found bin edge of size <= 0. Did you specify `bins` with" "non-monotonic sequence?") nbin = asarray(nbin) # Handle empty input. if N == 0: return np.zeros(nbin-2), edges # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right edge to be # counted in the last bin, and not as an outlier. for i in arange(D): # Rounding precision mindiff = dedges[i].min() if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. Ncount[i][nonzero(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays # will raise an error. hist = zeros(nbin, float).reshape(-1) # Compute the sample indices in the flattened histogram matrix. ni = nbin.argsort() xy = zeros(N, int) for i in arange(0, D-1): xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] # Compute the number of repetitions in xy and assign it to the # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges flatcount = bincount(xy, weights) a = arange(len(flatcount)) hist[a] = flatcount # Shape into a proper matrix hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] hist = hist.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True if normed: s = hist.sum() for i in arange(D): shape = ones(D, int) shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s if (hist.shape != nbin - 2).any(): raise RuntimeError( "Internal Shape Error") return hist, edges def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. Parameters ---------- a : array_like Array containing data to be averaged. If `a` is not an array, a conversion is attempted. axis : None or int or tuple of ints, optional Axis or axes along which to average `a`. The default, axis=None, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. .. versionadded:: 1.7.0 If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of `a` along the given axis) or of the same shape as `a`. If `weights=None`, then all data in `a` are assumed to have a weight equal to one. returned : bool, optional Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) is returned, otherwise only the average is returned. If `weights=None`, `sum_of_weights` is equivalent to the number of elements over which the average is taken. Returns ------- average, [sum_of_weights] : array_type or double Return the average along the specified axis. When returned is `True`, return a tuple with the average as the first element and the sum of the weights as the second element. The return type is `Float` if `a` is of integer type, otherwise it is of the same type as `a`. `sum_of_weights` is of the same type as `average`. Raises ------ ZeroDivisionError When all weights along axis are zero. See `numpy.ma.average` for a version robust to this type of error. TypeError When the length of 1D `weights` is not the same as the shape of `a` along axis. See Also -------- mean ma.average : average for masked arrays -- useful if your data contains "missing" values Examples -------- >>> data = range(1,5) >>> data [1, 2, 3, 4] >>> np.average(data) 2.5 >>> np.average(range(1,11), weights=range(10,0,-1)) 4.0 >>> data = np.arange(6).reshape((3,2)) >>> data array([[0, 1], [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) array([ 0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) Traceback (most recent call last): ... TypeError: Axis must be specified when shapes of a and weights differ. """ a = np.asanyarray(a) if weights is None: avg = a.mean(axis) scl = avg.dtype.type(a.size/avg.size) else: wgt = np.asanyarray(weights) if issubclass(a.dtype.type, (np.integer, np.bool_)): result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') else: result_dtype = np.result_type(a.dtype, wgt.dtype) # Sanity checks if a.shape != wgt.shape: if axis is None: raise TypeError( "Axis must be specified when shapes of a and weights " "differ.") if wgt.ndim != 1: raise TypeError( "1D weights expected when shapes of a and weights differ.") if wgt.shape[0] != a.shape[axis]: raise ValueError( "Length of weights not compatible with specified axis.") # setup wgt to broadcast along axis wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) wgt = wgt.swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=result_dtype) if np.any(scl == 0.0): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl if returned: if scl.shape != avg.shape: scl = np.broadcast_to(scl, avg.shape).copy() return avg, scl else: return avg def asarray_chkfinite(a, dtype=None, order=None): """Convert the input to an array, checking for NaNs or Infs. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays. Success requires no NaNs or Infs. dtype : data-type, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to 'C'. Returns ------- out : ndarray Array interpretation of `a`. No copy is performed if the input is already an ndarray. If `a` is a subclass of ndarray, a base class ndarray is returned. Raises ------ ValueError Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). See Also -------- asarray : Create and array. asanyarray : Similar function which passes through subclasses. ascontiguousarray : Convert input to a contiguous array. asfarray : Convert input to a floating point ndarray. asfortranarray : Convert input to an ndarray with column-major memory order. fromiter : Create an array from an iterator. fromfunction : Construct an array by executing a function on grid positions. Examples -------- Convert a list into an array. If all elements are finite ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] >>> np.asarray_chkfinite(a, dtype=float) array([1., 2.]) Raises ValueError if array_like contains Nans or Infs. >>> a = [1, 2, np.inf] >>> try: ... np.asarray_chkfinite(a) ... except ValueError: ... print('ValueError') ... ValueError """ a = asarray(a, dtype=dtype, order=order) if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): raise ValueError( "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): """ Evaluate a piecewise-defined function. Given a set of conditions and corresponding functions, evaluate each function on the input data wherever its condition is true. Parameters ---------- x : ndarray or scalar The input domain. condlist : list of bool arrays or bool scalars Each boolean array corresponds to a function in `funclist`. Wherever `condlist[i]` is True, `funclist[i](x)` is used as the output value. Each boolean array in `condlist` selects a piece of `x`, and should therefore be of the same shape as `x`. The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if ``len(funclist) - len(condlist) == 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding condition is True. It should take an array as input and give an array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional Any further arguments given to `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then each function is called as ``f(x, 1, 'a')``. kw : dict, optional Keyword arguments used in calling `piecewise` are passed to the functions upon execution, i.e., if called ``piecewise(..., ..., alpha=1)``, then each function is called as ``f(x, alpha=1)``. Returns ------- out : ndarray The output is the same shape and type as x and is found by calling the functions in `funclist` on the appropriate portions of `x`, as defined by the boolean arrays in `condlist`. Portions not covered by any condition have a default value of 0. See Also -------- choose, select, where Notes ----- This is similar to choose or select, except that functions are evaluated on elements of `x` that satisfy the corresponding condition from `condlist`. The result is:: |-- |funclist[0](x[condlist[0]]) out = |funclist[1](x[condlist[1]]) |... |funclist[n2](x[condlist[n2]]) |-- Examples -------- Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) array([-1., -1., -1., 1., 1., 1.]) Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) Apply the same function to a scalar value. >>> y = -2 >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) array(2) """ x = asanyarray(x) n2 = len(funclist) if (isscalar(condlist) or not (isinstance(condlist[0], list) or isinstance(condlist[0], ndarray))): if not isscalar(condlist) and x.size == 1 and x.ndim == 0: condlist = [[c] for c in condlist] else: condlist = [condlist] condlist = array(condlist, dtype=bool) n = len(condlist) # This is a hack to work around problems with NumPy's # handling of 0-d arrays and boolean indexing with # numpy.bool_ scalars zerod = False if x.ndim == 0: x = x[None] zerod = True if n == n2 - 1: # compute the "otherwise" condition. condelse = ~np.any(condlist, axis=0, keepdims=True) condlist = np.concatenate([condlist, condelse], axis=0) n += 1 y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] if not isinstance(item, collections.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) if zerod: y = y.squeeze() return y def select(condlist, choicelist, default=0): """ Return an array drawn from elements in choicelist, depending on conditions. Parameters ---------- condlist : list of bool ndarrays The list of conditions which determine from which array in `choicelist` the output elements are taken. When multiple conditions are satisfied, the first one encountered in `condlist` is used. choicelist : list of ndarrays The list of arrays from which the output elements are taken. It has to be of the same length as `condlist`. default : scalar, optional The element inserted in `output` when all conditions evaluate to False. Returns ------- output : ndarray The output at position m is the m-th element of the array in `choicelist` where the m-th element of the corresponding array in `condlist` is True. See Also -------- where : Return elements from one of two arrays depending on condition. take, choose, compress, diag, diagonal Examples -------- >>> x = np.arange(10) >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. if len(condlist) != len(choicelist): raise ValueError( 'list of cases must be same length as list of conditions') # Now that the dtype is known, handle the deprecated select([], []) case if len(condlist) == 0: # 2014-02-24, 1.9 warnings.warn("select with an empty condition list is not possible" "and will be deprecated", DeprecationWarning, stacklevel=2) return np.asarray(default)[()] choicelist = [np.asarray(choice) for choice in choicelist] choicelist.append(np.asarray(default)) # need to get the result type before broadcasting for correct scalar # behaviour dtype = np.result_type(*choicelist) # Convert conditions to arrays and broadcast conditions and choices # as the shape is needed for the result. Doing it separately optimizes # for example when all choices are scalars. condlist = np.broadcast_arrays(*condlist) choicelist = np.broadcast_arrays(*choicelist) # If cond array is not an ndarray in boolean format or scalar bool, abort. deprecated_ints = False for i in range(len(condlist)): cond = condlist[i] if cond.dtype.type is not np.bool_: if np.issubdtype(cond.dtype, np.integer): # A previous implementation accepted int ndarrays accidentally. # Supported here deliberately, but deprecated. condlist[i] = condlist[i].astype(bool) deprecated_ints = True else: raise ValueError( 'invalid entry in choicelist: should be boolean ndarray') if deprecated_ints: # 2014-02-24, 1.9 msg = "select condlists containing integer ndarrays is deprecated " \ "and will be removed in the future. Use `.astype(bool)` to " \ "convert to bools." warnings.warn(msg, DeprecationWarning, stacklevel=2) if choicelist[0].ndim == 0: # This may be common, so avoid the call. result_shape = condlist[0].shape else: result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape result = np.full(result_shape, choicelist[-1], dtype) # Use np.copyto to burn each choicelist array onto result, using the # corresponding condlist as a boolean mask. This is done in reverse # order since the first choice should take precedence. choicelist = choicelist[-2::-1] condlist = condlist[::-1] for choice, cond in zip(choicelist, condlist): np.copyto(result, choice, where=cond) return result def copy(a, order='K'): """ Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) Returns ------- arr : ndarray Array interpretation of `a`. Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False """ return array(a, order=order, copy=True) # Basic operations def gradient(f, *varargs, **kwargs): """ Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior points and either first or second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. varargs : list of scalar or array, optional Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: 1. single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. If `axis` is given, the number of varargs must equal the number of axes. Default: 1. edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. .. versionadded:: 1.9.1 axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 Returns ------- gradient : ndarray or list of ndarray A set of ndarrays (or a single ndarray if there is only one dimension) corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. Examples -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) >>> np.gradient(f) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. For instance a uniform spacing: >>> x = np.arange(f.size) >>> np.gradient(f, x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) Or a non uniform one: >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. >>> y = [1., 1.5, 3.5] >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) array([ 1., 2., 4., 6., 7.]) >>> np.gradient(f, edge_order=2) array([-0., 2., 4., 6., 8.]) The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the spacing the finite difference coefficients are computed by minimising the consistency error :math:`\\eta_{i}`: .. math:: \\eta_{i} = f_{i}^{\\left(1\\right)} - \\left[ \\alpha f\\left(x_{i}\\right) + \\beta f\\left(x_{i} + h_{d}\\right) + \\gamma f\\left(x_{i}-h_{s}\\right) \\right] By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` with their Taylor series expansion, this translates into solving the following the linear system: .. math:: \\left\\{ \\begin{array}{r} \\alpha+\\beta+\\gamma=0 \\\\ -\\beta h_{d}+\\gamma h_{s}=1 \\\\ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 \\end{array} \\right. The resulting approximation of :math:`f_{i}^{(1)}` is the following: .. math:: \\hat f_{i}^{(1)} = \\frac{ h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + h_{s}h_{d}^{2}}{h_{d} + h_{s}}\\right) It is worth noting that if :math:`h_{s}=h_{d}` (i.e., data are evenly spaced) we find the standard second order approximation: .. math:: \\hat f_{i}^{(1)}= \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\mathcal{O}\\left(h^{2}\\right) With a similar procedure the forward/backward approximations used for boundaries can be derived. References ---------- .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics (Texts in Applied Mathematics). New York: Springer. .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations in Geophysical Fluid Dynamics. New York: Springer. .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on Arbitrarily Spaced Grids, Mathematics of Computation 51, no. 184 : 699-706. `PDF <http://www.ams.org/journals/mcom/1988-51-184/ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ f = np.asanyarray(f) N = f.ndim # number of dimensions axes = kwargs.pop('axis', None) if axes is None: axes = tuple(range(N)) else: axes = _nx.normalize_axis_tuple(axes, N) len_axes = len(axes) n = len(varargs) if n == 0: dx = [1.0] * len_axes elif n == len_axes or (n == 1 and np.isscalar(varargs[0])): dx = list(varargs) for i, distances in enumerate(dx): if np.isscalar(distances): continue if len(distances) != f.shape[axes[i]]: raise ValueError("distances must be either scalars or match " "the length of the corresponding dimension") diffx = np.diff(dx[i]) # if distances are constant reduce to the scalar case # since it brings a consistent speedup if (diffx == diffx[0]).all(): diffx = diffx[0] dx[i] = diffx if len(dx) == 1: dx *= len_axes else: raise TypeError("invalid number of arguments") edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( '", "'.join(kwargs.keys()))) if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N slice4 = [slice(None)]*N otype = f.dtype if otype.type is np.datetime64: # the timedelta dtype with the same unit information otype = np.dtype(otype.name.replace('datetime', 'timedelta')) # view as timedelta to allow addition f = f.view(otype) elif otype.type is np.timedelta64: pass elif np.issubdtype(otype, np.inexact): pass else: # all other types convert to floating point otype = np.double for i, axis in enumerate(axes): if f.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least (edge_order + 1) elements are required.") # result allocation out = np.empty_like(f, dtype=otype) uniform_spacing = np.isscalar(dx[i]) # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) slice2[axis] = slice(None, -2) slice3[axis] = slice(1, -1) slice4[axis] = slice(2, None) if uniform_spacing: out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) else: dx1 = dx[i][0:-1] dx2 = dx[i][1:] a = -(dx2)/(dx1 * (dx1 + dx2)) b = (dx2 - dx1) / (dx1 * dx2) c = dx1 / (dx2 * (dx1 + dx2)) # fix the shape for broadcasting shape = np.ones(N, dtype=int) shape[axis] = -1 a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] # Numerical differentiation: 1st order edges if edge_order == 1: slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 dx_0 = dx[i] if uniform_spacing else dx[i][0] # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) out[slice1] = (f[slice2] - f[slice3]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = dx[i] if uniform_spacing else dx[i][-1] # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) out[slice1] = (f[slice2] - f[slice3]) / dx_n # Numerical differentiation: 2nd order edges else: slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 if uniform_spacing: a = -1.5 / dx[i] b = 2. / dx[i] c = -0.5 / dx[i] else: dx1 = dx[i][0] dx2 = dx[i][1] a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 slice4[axis] = -1 if uniform_spacing: a = 0.5 / dx[i] b = -2. / dx[i] c = 1.5 / dx[i] else: dx1 = dx[i][-2] dx2 = dx[i][-1] a = (dx2) / (dx1 * (dx1 + dx2)) b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if len_axes == 1: return outvals[0] else: return outvals def diff(a, n=1, axis=-1): """ Calculate the n-th discrete difference along the given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` recursively. Parameters ---------- a : array_like Input array n : int, optional The number of times values are differenced. If zero, the input is returned as-is. axis : int, optional The axis along which the difference is taken, default is the last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. The type of the output is the same as the type of the difference between any two elements of `a`. This is the same as the type of `a` in most cases. A notable exception is `datetime64`, which results in a `timedelta64` output array. See Also -------- gradient, ediff1d, cumsum Notes ----- Type is preserved for boolean arrays, so the result will contain `False` when consecutive elements are the same and `True` when they differ. For unsigned integer arrays, the results will also be unsigned. This should not be surprising, as the result is consistent with calculating the difference directly: >>> u8_arr = np.array([1, 0], dtype=np.uint8) >>> np.diff(u8_arr) array([255], dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] array(255, np.uint8) If this is not desirable, then the array should be cast to a larger integer type first: >>> i16_arr = u8_arr.astype(np.int16) >>> np.diff(i16_arr) array([-1], dtype=int16) Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) >>> np.diff(x) array([1, 1], dtype='timedelta64[D]') """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = a.ndim axis = normalize_axis_index(axis, nd) slice1 = [slice(None)] * nd slice2 = [slice(None)] * nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) op = not_equal if a.dtype == np.bool_ else subtract for _ in range(n): a = op(a[slice1], a[slice2]) return a def interp(x, xp, fp, left=None, right=None, period=None): """ One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points. Parameters ---------- x : array_like The x-coordinates of the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument `period` is not specified. Otherwise, `xp` is internally sorted after normalizing the periodic boundaries with ``xp = xp % period``. fp : 1-D sequence of float or complex The y-coordinates of the data points, same length as `xp`. left : optional float or complex corresponding to fp Value to return for `x < xp[0]`, default is `fp[0]`. right : optional float or complex corresponding to fp Value to return for `x > xp[-1]`, default is `fp[-1]`. period : None or float, optional A period for the x-coordinates. This parameter allows the proper interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. .. versionadded:: 1.10.0 Returns ------- y : float or complex (corresponding to fp) or ndarray The interpolated values, same shape as `x`. Raises ------ ValueError If `xp` and `fp` have different length If `xp` or `fp` are not 1-D sequences If `period == 0` Notes ----- Does not check that the x-coordinate sequence `xp` is increasing. If `xp` is not increasing, the results are nonsense. A simple check for increasing is:: np.all(np.diff(xp) > 0) Examples -------- >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) array([ 3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 Plot an interpolant to the sine function: >>> x = np.linspace(0, 2*np.pi, 10) >>> y = np.sin(x) >>> xvals = np.linspace(0, 2*np.pi, 50) >>> yinterp = np.interp(xvals, x, y) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(xvals, yinterp, '-x') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.show() Interpolation with periodic x-coordinates: >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) Complex interpolation >>> x = [1.5, 4.0] >>> xp = [2,3,5] >>> fp = [1.0j, 0, 2+3j] >>> np.interp(x, xp, fp) array([ 0.+1.j , 1.+1.5j]) """ fp = np.asarray(fp) if np.iscomplexobj(fp): interp_func = compiled_interp_complex input_dtype = np.complex128 else: interp_func = compiled_interp input_dtype = np.float64 if period is None: if isinstance(x, (float, int, number)): return interp_func([x], xp, fp, left, right).item() elif isinstance(x, np.ndarray) and x.ndim == 0: return interp_func([x], xp, fp, left, right).item() else: return interp_func(x, xp, fp, left, right) else: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None return_array = True if isinstance(x, (float, int, number)): return_array = False x = [x] x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=input_dtype) if xp.ndim != 1 or fp.ndim != 1: raise ValueError("Data points must be 1-D sequences") if xp.shape[0] != fp.shape[0]: raise ValueError("fp and xp are not of the same length") # normalizing periodic boundaries x = x % period xp = xp % period asort_xp = np.argsort(xp) xp = xp[asort_xp] fp = fp[asort_xp] xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) if return_array: return interp_func(x, xp, fp, left, right) else: return interp_func(x, xp, fp, left, right).item() def angle(z, deg=0): """ Return the angle of the complex argument. Parameters ---------- z : array_like A complex number or sequence of complex numbers. deg : bool, optional Return angle in degrees if True, radians if False (default). Returns ------- angle : ndarray or scalar The counterclockwise angle from the positive real axis on the complex plane, with dtype as numpy.float64. See Also -------- arctan2 absolute Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) >>> np.angle(1+1j, deg=True) # in degrees 45.0 """ if deg: fact = 180/pi else: fact = 1.0 z = asarray(z) if (issubclass(z.dtype.type, _nx.complexfloating)): zimag = z.imag zreal = z.real else: zimag = 0 zreal = z return arctan2(zimag, zreal) * fact def unwrap(p, discont=pi, axis=-1): """ Unwrap by changing deltas between values to 2*pi complement. Unwrap radian phase `p` by changing absolute jumps greater than `discont` to their 2*pi complement along the given axis. Parameters ---------- p : array_like Input array. discont : float, optional Maximum discontinuity between values, default is ``pi``. axis : int, optional Axis along which unwrap will operate, default is the last axis. Returns ------- out : ndarray Output array. See Also -------- rad2deg, deg2rad Notes ----- If the discontinuity in `p` is smaller than ``pi``, but larger than `discont`, no unwrapping is done because taking the 2*pi complement would only make the discontinuity larger. Examples -------- >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) >>> np.unwrap(phase) array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) """ p = asarray(p) nd = p.ndim dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd _nx.copyto(ph_correct, 0, where=abs(dd) < discont) up = array(p, copy=True, dtype='d') up[slice1] = p[slice1] + ph_correct.cumsum(axis) return up def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b def trim_zeros(filt, trim='fb'): """ Trim the leading and/or trailing zeros from a 1-D array or sequence. Parameters ---------- filt : 1-D array or sequence Input array. trim : str, optional A string with 'f' representing trim from front and 'b' to trim from back. Default is 'fb', trim zeros from both front and back of the array. Returns ------- trimmed : 1-D array or sequence The result of trimming the input. The input data type is preserved. Examples -------- >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') array([0, 0, 0, 1, 2, 3, 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ first = 0 trim = trim.upper() if 'F' in trim: for i in filt: if i != 0.: break else: first = first + 1 last = len(filt) if 'B' in trim: for i in filt[::-1]: if i != 0.: break else: last = last - 1 return filt[first:last] @deprecate def unique(x): """ This function is deprecated. Use numpy.lib.arraysetops.unique() instead. """ try: tmp = x.flatten() if tmp.size == 0: return tmp tmp.sort() idx = concatenate(([True], tmp[1:] != tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) return asarray(items) def extract(condition, arr): """ Return the elements of an array that satisfy some condition. This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. Note that `place` does the exact opposite of `extract`. Parameters ---------- condition : array_like An array whose nonzero or True entries indicate the elements of `arr` to extract. arr : array_like Input array of the same size as `condition`. Returns ------- extract : ndarray Rank 1 array of values from `arr` where `condition` is True. See Also -------- take, put, copyto, compress, place Examples -------- >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> condition = np.mod(arr, 3)==0 >>> condition array([[ True, False, False, True], [False, False, True, False], [False, True, False, False]], dtype=bool) >>> np.extract(condition, arr) array([0, 3, 6, 9]) If `condition` is boolean: >>> arr[condition] array([0, 3, 6, 9]) """ return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) def place(arr, mask, vals): """ Change elements of an array based on conditional and input values. Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that `place` uses the first N elements of `vals`, where N is the number of True values in `mask`, while `copyto` uses the elements where `mask` is True. Note that `extract` does the exact opposite of `place`. Parameters ---------- arr : ndarray Array to put data into. mask : array_like Boolean mask array. Must have the same size as `a`. vals : 1-D sequence Values to put into `a`. Only the first N elements are used, where N is the number of True values in `mask`. If `vals` is smaller than N, it will be repeated, and if elements of `a` are to be masked, this sequence must be non-empty. See Also -------- copyto, put, take, extract Examples -------- >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]]) """ if not isinstance(arr, np.ndarray): raise TypeError("argument 1 must be numpy.ndarray, " "not {name}".format(name=type(arr).__name__)) return _insert(arr, mask, vals) def disp(mesg, device=None, linefeed=True): """ Display a message on a device. Parameters ---------- mesg : str Message to display. device : object Device to write message. If None, defaults to ``sys.stdout`` which is very similar to ``print``. `device` needs to have ``write()`` and ``flush()`` methods. linefeed : bool, optional Option whether to print a line feed or not. Defaults to True. Raises ------ AttributeError If `device` does not have a ``write()`` or ``flush()`` method. Examples -------- Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: >>> from StringIO import StringIO >>> buf = StringIO() >>> np.disp('"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' """ if device is None: device = sys.stdout if linefeed: device.write('%s\n' % mesg) else: device.write('%s' % mesg) device.flush() return # See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) _ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) _ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) _SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) def _parse_gufunc_signature(signature): """ Parse string signatures for a generalized universal function. Arguments --------- signature : string Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` for ``np.matmul``. Returns ------- Tuple of input and output core dimensions parsed from the signature, each of the form List[Tuple[str, ...]]. """ if not re.match(_SIGNATURE, signature): raise ValueError( 'not a valid gufunc signature: {}'.format(signature)) return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, arg_list)] for arg_list in signature.split('->')) def _update_dim_sizes(dim_sizes, arg, core_dims): """ Incrementally check and update core dimension sizes for a single argument. Arguments --------- dim_sizes : Dict[str, int] Sizes of existing core dimensions. Will be updated in-place. arg : ndarray Argument to examine. core_dims : Tuple[str, ...] Core dimensions for this argument. """ if not core_dims: return num_core_dims = len(core_dims) if arg.ndim < num_core_dims: raise ValueError( '%d-dimensional argument does not have enough ' 'dimensions for all core dimensions %r' % (arg.ndim, core_dims)) core_shape = arg.shape[-num_core_dims:] for dim, size in zip(core_dims, core_shape): if dim in dim_sizes: if size != dim_sizes[dim]: raise ValueError( 'inconsistent size for core dimension %r: %r vs %r' % (dim, size, dim_sizes[dim])) else: dim_sizes[dim] = size def _parse_input_dimensions(args, input_core_dims): """ Parse broadcast and core dimensions for vectorize with a signature. Arguments --------- args : Tuple[ndarray, ...] Tuple of input arguments to examine. input_core_dims : List[Tuple[str, ...]] List of core dimensions corresponding to each input. Returns ------- broadcast_shape : Tuple[int, ...] Common shape to broadcast all non-core dimensions to. dim_sizes : Dict[str, int] Common sizes for named core dimensions. """ broadcast_args = [] dim_sizes = {} for arg, core_dims in zip(args, input_core_dims): _update_dim_sizes(dim_sizes, arg, core_dims) ndim = arg.ndim - len(core_dims) dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) broadcast_args.append(dummy_array) broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) return broadcast_shape, dim_sizes def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): """Helper for calculating broadcast shapes with core dimensions.""" return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) for core_dims in list_of_core_dims] def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes): """Helper for creating output arrays in vectorize.""" shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) arrays = tuple(np.empty(shape, dtype=dtype) for shape, dtype in zip(shapes, dtypes)) return arrays class vectorize(object): """ vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None) Generalized function class. Define a vectorized function which takes a nested sequence of objects or numpy arrays as inputs and returns an single or tuple of numpy array as output. The vectorized function evaluates `pyfunc` over successive tuples of the input arrays like the python map function, except it uses the broadcasting rules of numpy. The data type of the output of `vectorized` is determined by calling the function with the first element of the input. This can be avoided by specifying the `otypes` argument. Parameters ---------- pyfunc : callable A python function or method. otypes : str or list of dtypes, optional The output data type. It must be specified as either a string of typecode characters or a list of data type specifiers. There should be one data type specifier for each output. doc : str, optional The docstring for the function. If `None`, the docstring will be the ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. .. versionadded:: 1.7.0 cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. .. versionadded:: 1.7.0 signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for vectorized matrix-vector multiplication. If provided, ``pyfunc`` will be called with (and expected to return) arrays with shapes given by the size of corresponding core dimensions. By default, ``pyfunc`` is assumed to take scalars as input and output. .. versionadded:: 1.12.0 Returns ------- vectorized : callable Vectorized function. Examples -------- >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: ... return a - b ... else: ... return a + b >>> vfunc = np.vectorize(myfunc) >>> vfunc([1, 2, 3, 4], 2) array([3, 4, 1, 2]) The docstring is taken from the input function to `vectorize` unless it is specified: >>> vfunc.__doc__ 'Return a-b if a>b, otherwise return a+b' >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') >>> vfunc.__doc__ 'Vectorized `myfunc`' The output type is determined by evaluating the first element of the input, unless it is specified: >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.int32'> >>> vfunc = np.vectorize(myfunc, otypes=[float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) <type 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length such as the coefficients for a polynomial as in `polyval`: >>> def mypolyval(p, x): ... _p = list(p) ... res = _p.pop(0) ... while _p: ... res = res*x + _p.pop(0) ... return res >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) >>> vpolyval(p=[1, 2, 3], x=[0, 1]) array([3, 6]) Positional arguments may also be excluded by specifying their position: >>> vpolyval.excluded.add(0) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) The `signature` argument allows for vectorizing functions that act on non-scalar arrays of fixed length. For example, you can use it for a vectorized calculation of Pearson correlation coefficient and its p-value: >>> import scipy.stats >>> pearsonr = np.vectorize(scipy.stats.pearsonr, ... signature='(n),(n)->(),()') >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) (array([ 1., -1.]), array([ 0., 0.])) Or for a vectorized convolution: >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') >>> convolve(np.eye(4), [1, 2, 1]) array([[ 1., 2., 1., 0., 0., 0.], [ 0., 1., 2., 1., 0., 0.], [ 0., 0., 1., 2., 1., 0.], [ 0., 0., 0., 1., 2., 1.]]) See Also -------- frompyfunc : Takes an arbitrary Python function and returns a ufunc Notes ----- The `vectorize` function is provided primarily for convenience, not for performance. The implementation is essentially a for loop. If `otypes` is not specified, then a call to the function with the first argument will be used to determine the number of outputs. The results of this call will be cached if `cache` is `True` to prevent calling the function twice. However, to implement the cache, the original function must be wrapped which will slow down subsequent calls, so only do this if your function is expensive. The new keyword argument interface and `excluded` argument support further degrades performance. References ---------- .. [1] NumPy Reference, section `Generalized Universal Function API <http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_. """ def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None): self.pyfunc = pyfunc self.cache = cache self.signature = signature self._ufunc = None # Caching to improve default performance if doc is None: self.__doc__ = pyfunc.__doc__ else: self.__doc__ = doc if isinstance(otypes, str): for char in otypes: if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): otypes = ''.join([_nx.dtype(x).char for x in otypes]) elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes # Excluded variable support if excluded is None: excluded = set() self.excluded = set(excluded) if signature is not None: self._in_and_out_core_dims = _parse_gufunc_signature(signature) else: self._in_and_out_core_dims = None def __call__(self, *args, **kwargs): """ Return arrays with the results of `pyfunc` broadcast (vectorized) over `args` and `kwargs` not in `excluded`. """ excluded = self.excluded if not kwargs and not excluded: func = self.pyfunc vargs = args else: # The wrapper accepts only positional arguments: we use `names` and # `inds` to mutate `the_args` and `kwargs` to pass to the original # function. nargs = len(args) names = [_n for _n in kwargs if _n not in excluded] inds = [_i for _i in range(nargs) if _i not in excluded] the_args = list(args) def func(*vargs): for _n, _i in enumerate(inds): the_args[_i] = vargs[_n] kwargs.update(zip(names, vargs[len(inds):])) return self.pyfunc(*the_args, **kwargs) vargs = [args[_i] for _i in inds] vargs.extend([kwargs[_n] for _n in names]) return self._vectorize_call(func=func, args=vargs) def _get_ufunc_and_otypes(self, func, args): """Return (ufunc, otypes).""" # frompyfunc will fail if args is empty if not args: raise ValueError('args can not be empty') if self.otypes is not None: otypes = self.otypes nout = len(otypes) # Note logic here: We only *use* self._ufunc if func is self.pyfunc # even though we set self._ufunc regardless. if func is self.pyfunc and self._ufunc is not None: ufunc = self._ufunc else: ufunc = self._ufunc = frompyfunc(func, len(args), nout) else: # Get number of outputs and output types by calling the function on # the first entries of args. We also cache the result to prevent # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) args = [asarray(arg) for arg in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') inputs = [arg.flat[0] for arg in args] outputs = func(*inputs) # Performance note: profiling indicates that -- for simple # functions at least -- this wrapping can almost double the # execution time. # Hence we make it optional. if self.cache: _cache = [outputs] def _func(*vargs): if _cache: return _cache.pop() else: return func(*vargs) else: _func = func if isinstance(outputs, tuple): nout = len(outputs) else: nout = 1 outputs = (outputs,) otypes = ''.join([asarray(outputs[_k]).dtype.char for _k in range(nout)]) # Performance note: profiling indicates that creating the ufunc is # not a significant cost compared with wrapping so it seems not # worth trying to cache this. ufunc = frompyfunc(_func, len(args), nout) return ufunc, otypes def _vectorize_call(self, func, args): """Vectorized call to `func` over positional `args`.""" if self.signature is not None: res = self._vectorize_call_with_signature(func, args) elif not args: res = func() else: ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) # Convert args to object arrays first inputs = [array(a, copy=False, subok=True, dtype=object) for a in args] outputs = ufunc(*inputs) if ufunc.nout == 1: res = array(outputs, copy=False, subok=True, dtype=otypes[0]) else: res = tuple([array(x, copy=False, subok=True, dtype=t) for x, t in zip(outputs, otypes)]) return res def _vectorize_call_with_signature(self, func, args): """Vectorized call over positional arguments with a signature.""" input_core_dims, output_core_dims = self._in_and_out_core_dims if len(args) != len(input_core_dims): raise TypeError('wrong number of positional arguments: ' 'expected %r, got %r' % (len(input_core_dims), len(args))) args = tuple(asanyarray(arg) for arg in args) broadcast_shape, dim_sizes = _parse_input_dimensions( args, input_core_dims) input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, input_core_dims) args = [np.broadcast_to(arg, shape, subok=True) for arg, shape in zip(args, input_shapes)] outputs = None otypes = self.otypes nout = len(output_core_dims) for index in np.ndindex(*broadcast_shape): results = func(*(arg[index] for arg in args)) n_results = len(results) if isinstance(results, tuple) else 1 if nout != n_results: raise ValueError( 'wrong number of outputs from pyfunc: expected %r, got %r' % (nout, n_results)) if nout == 1: results = (results,) if outputs is None: for result, core_dims in zip(results, output_core_dims): _update_dim_sizes(dim_sizes, result, core_dims) if otypes is None: otypes = [asarray(result).dtype for result in results] outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) for output, result in zip(outputs, results): output[index] = result if outputs is None: # did not call the function even once if otypes is None: raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') if builtins.any(dim not in dim_sizes for dims in output_core_dims for dim in dims): raise ValueError('cannot call `vectorize` with a signature ' 'including new output dimensions on size 0 ' 'inputs') outputs = _create_arrays(broadcast_shape, dim_sizes, output_core_dims, otypes) return outputs[0] if nout == 1 else outputs def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None): """ Estimate a covariance matrix, given data and weights. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance of :math:`x_i`. See the notes for an outline of the algorithm. Parameters ---------- m : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same form as that of `m`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the number of observations given (unbiased estimate). If `bias` is True, then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. Note that ``ddof=1`` will return the unbiased estimate, even if both `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer freguency weights; the number of times each observation vector should be repeated. .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. .. versionadded:: 1.10 Returns ------- out : ndarray The covariance matrix of the variables. See Also -------- corrcoef : Normalized covariance matrix Notes ----- Assume that the observations are in the columns of the observation array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` as it should. Examples -------- Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T >>> x array([[0, 1, 2], [2, 1, 0]]) Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance matrix shows this clearly: >>> np.cov(x) array([[ 1., -1.], [-1., 1.]]) Note that element :math:`C_{0,1}`, which shows the correlation between :math:`x_0` and :math:`x_1`, is negative. Further, note how `x` and `y` are combined: >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.stack((x, y), axis=0) >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x, y)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] >>> print(np.cov(x)) 11.71 """ # Check inputs if ddof is not None and ddof != int(ddof): raise ValueError( "ddof must be integer") # Handles complex arrays too m = np.asarray(m) if m.ndim > 2: raise ValueError("m has more than 2 dimensions") if y is None: dtype = np.result_type(m, np.float64) else: y = np.asarray(y) if y.ndim > 2: raise ValueError("y has more than 2 dimensions") dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) if not rowvar and X.shape[0] != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) if y is not None: y = array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T X = np.concatenate((X, y), axis=0) if ddof is None: if bias == 0: ddof = 1 else: ddof = 0 # Get the product of frequencies and weights w = None if fweights is not None: fweights = np.asarray(fweights, dtype=float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") if fweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional fweights") if fweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and fweights") if any(fweights < 0): raise ValueError( "fweights cannot be negative") w = fweights if aweights is not None: aweights = np.asarray(aweights, dtype=float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") if aweights.shape[0] != X.shape[1]: raise RuntimeError( "incompatible numbers of samples and aweights") if any(aweights < 0): raise ValueError( "aweights cannot be negative") if w is None: w = aweights else: w *= aweights avg, w_sum = average(X, axis=1, weights=w, returned=True) w_sum = w_sum[0] # Determine the normalization if w is None: fact = X.shape[1] - ddof elif ddof == 0: fact = w_sum elif aweights is None: fact = w_sum - ddof else: fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) fact = 0.0 X -= avg[:, None] if w is None: X_T = X.T else: X_T = (X*w).T c = dot(X, X_T.conj()) c *= 1. / np.float64(fact) return c.squeeze() def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue): """ Return Pearson product-moment correlation coefficients. Please refer to the documentation for `cov` for more detail. The relationship between the correlation coefficient matrix, `R`, and the covariance matrix, `C`, is .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } The values of `R` are between -1 and 1, inclusive. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. bias : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 ddof : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.10.0 Returns ------- R : ndarray The correlation coefficient matrix of the variables. See Also -------- cov : Covariance matrix Notes ----- Due to floating point rounding the resulting array may not be Hermitian, the diagonal elements may not be 1, and the elements may not satisfy the inequality abs(a) <= 1. The real and imaginary parts are clipped to the interval [-1, 1] in an attempt to improve on that situation but is not much help in the complex case. This function accepts but discards arguments `bias` and `ddof`. This is for backwards compatibility with previous versions of this function. These arguments had no effect on the return values of the function and can be safely ignored in this and previous versions of numpy. """ if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn('bias and ddof have no effect and are deprecated', DeprecationWarning, stacklevel=2) c = cov(x, y, rowvar) try: d = diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c stddev = sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :] # Clip real and imaginary parts to [-1, 1]. This does not guarantee # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without # excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return c def blackman(M): """ Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, hamming, hanning, kaiser Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.blackman(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, with the maximum value normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy.fft import fft, fftshift >>> window = np.bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) def hanning(M): """ Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- bartlett, blackman, hamming, kaiser Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 0.07937323, 0. ]) Plot the window and its frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) def hamming(M): """ Return the Hamming window. The Hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hanning, kaiser Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) \\qquad 0 \\leq n \\leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0, M) return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 _i0A = [ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1 ] _i0B = [ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1 ] def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] return 0.5*(b0 - b2) def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A) def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) def i0(x): """ Modified Bessel function of the first kind, order 0. Usually denoted :math:`I_0`. This function does broadcast, but will *not* "up-cast" int dtype arguments unless accompanied by at least one float or complex dtype argument (see Raises below). Parameters ---------- x : array_like, dtype float or complex Argument of the Bessel function. Returns ------- out : ndarray, shape = x.shape, dtype = x.dtype The modified Bessel function evaluated at each of the elements of `x`. Raises ------ TypeError: array cannot be safely cast to required type If argument consists exclusively of int dtypes. See Also -------- scipy.special.iv, scipy.special.ive Notes ----- We use the algorithm published by Clenshaw [1]_ and referenced by Abramowitz and Stegun [2]_, for which the function domain is partitioned into the two intervals [0,8] and (8,inf), and Chebyshev polynomial expansions are employed in each interval. Relative error on the domain [0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). References ---------- .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in *National Physical Laboratory Mathematical Tables*, vol. 5, London: Her Majesty's Stationery Office, 1962. .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 379. http://www.math.sfu.ca/~cbm/aands/page_379.htm .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html Examples -------- >>> np.i0([0.]) array(1.0) >>> np.i0([0., 1. + 2j]) array([ 1.00000000+0.j , 0.18785373+0.64616944j]) """ x = atleast_1d(x).copy() y = empty_like(x) ind = (x < 0) x[ind] = -x[ind] ind = (x <= 8.0) y[ind] = _i0_1(x[ind]) ind2 = ~ind y[ind2] = _i0_2(x[ind2]) return y.squeeze() ## End of cephes code for i0 def kaiser(M, beta): """ Return the Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Shape parameter for window. Returns ------- out : array The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- bartlett, blackman, hamming, hanning Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} \\right)/I_0(\\beta) with .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate many other windows by varying the beta parameter. ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hanning 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will get returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- >>> np.kaiser(12, 14) array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: >>> from numpy.fft import fft, fftshift >>> window = np.kaiser(51, 14) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(mag) >>> response = np.clip(response, -100, 100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ from numpy.dual import i0 if M == 1: return np.array([1.]) n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) def sinc(x): """ Return the sinc function. The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. Parameters ---------- x : ndarray Array (possibly multi-dimensional) of values for which to to calculate ``sinc(x)``. Returns ------- out : ndarray ``sinc(x)``, which has the same shape as the input. Notes ----- ``sinc(0)`` is the limit value 1. The name sinc is short for "sine cardinal" or "sinus cardinalis". The sinc function is used in various signal processing applications, including in anti-aliasing, in the construction of a Lanczos resampling filter, and in interpolation. For bandlimited interpolation of discrete-time signals, the ideal interpolation kernel is proportional to the sinc function. References ---------- .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", http://en.wikipedia.org/wiki/Sinc_function Examples -------- >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("X") <matplotlib.text.Text object at 0x...> >>> plt.show() It works in 2-D as well: >>> x = np.linspace(-4, 4, 401) >>> xx = np.outer(x, x) >>> plt.imshow(np.sinc(xx)) <matplotlib.image.AxesImage object at 0x...> """ x = np.asanyarray(x) y = pi * where(x == 0, 1.0e-20, x) return sin(y)/y def msort(a): """ Return a copy of an array sorted along the first axis. Parameters ---------- a : array_like Array to be sorted. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- sort Notes ----- ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ b = array(a, subok=True, copy=True) b.sort(0) return b def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input array or object that can be converted to an array. func : callable Reduction function capable of receiving a single axis argument. It is is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ a = np.asanyarray(a) axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim axis = _nx.normalize_axis_tuple(axis, nd) for ax in axis: keepdim[ax] = 1 if len(axis) == 1: kwargs['axis'] = axis[0] else: keep = set(range(nd)) - set(axis) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 keepdim = tuple(keepdim) else: keepdim = (1,) * a.ndim r = func(a, **kwargs) return r, keepdim def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): """ Compute the median along the specified axis. Returns the median of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default is to compute the median along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` for calculations. The input array will be modified by the call to `median`. This will save memory when you do not need to preserve the contents of the input array. Treat the input as undefined, but it will probably be fully or partially sorted. Default is False. If `overwrite_input` is ``True`` and `a` is not already an `ndarray`, an error will be raised. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.9.0 Returns ------- median : ndarray A new array holding the result. If the input contains integers or floats smaller than ``float64``, then the output data-type is ``np.float64``. Otherwise, the data-type of the output is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, percentile Notes ----- Given a vector ``V`` of length ``N``, the median of ``V`` is the middle value of a sorted copy of ``V``, ``V_sorted`` - i e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the two middle values of ``V_sorted`` when ``N`` is even. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.median(a) 3.5 >>> np.median(a, axis=0) array([ 6.5, 4.5, 2.5]) >>> np.median(a, axis=1) array([ 7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) array([ 6.5, 4.5, 2.5]) >>> m array([ 6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) 3.5 >>> assert not np.all(a==b) """ r, k = _ureduce(a, func=_median, axis=axis, out=out, overwrite_input=overwrite_input) if keepdims: return r.reshape(k) else: return r def _median(a, axis=None, out=None, overwrite_input=False): # can't be reasonably be implemented in terms of percentile as we have to # call mean to not break astropy a = np.asanyarray(a) # Set the partition indexes if axis is None: sz = a.size else: sz = a.shape[axis] if sz % 2 == 0: szh = sz // 2 kth = [szh - 1, szh] else: kth = [(sz - 1) // 2] # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): kth.append(-1) if overwrite_input: if axis is None: part = a.ravel() part.partition(kth) else: a.partition(kth, axis=axis) part = a else: part = partition(a, kth, axis=axis) if part.shape == (): # make 0-D arrays work return part.item() if axis is None: axis = 0 indexer = [slice(None)] * part.ndim index = part.shape[axis] // 2 if part.shape[axis] % 2 == 1: # index with slice to allow mean (below) to work indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact) and sz > 0: # warn and return nans like mean would rout = mean(part[indexer], axis=axis, out=out) return np.lib.utils._median_nancheck(part, rout, axis, out) else: # if there are no nans # Use mean in odd and even case to coerce data type # and check, use out array. return mean(part[indexer], axis=axis, out=out) def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Compute the qth percentile of the data along the specified axis. Returns the qth percentile(s) of the array elements. Parameters ---------- a : array_like Input array or object that can be converted to an array. q : float in range of [0,100] (or sequence of floats) Percentile to compute, which must be between 0 and 100 inclusive. axis : {int, sequence of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. A sequence of axes is supported since version 1.9.0. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional If True, then allow use of memory of input array `a` calculations. The input array will be modified by the call to `percentile`. This will save memory when you do not need to preserve the contents of the input array. In this case you should not make any assumptions about the contents of the input `a` after this function completes -- treat it as undefined. Default is False. If `a` is not already an array, this parameter will have no effect as `a` will be converted to an array internally regardless of the value of this parameter. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. * lower: ``i``. * higher: ``j``. * nearest: ``i`` or ``j``, whichever is nearest. * midpoint: ``(i + j) / 2``. .. versionadded:: 1.9.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. .. versionadded:: 1.9.0 Returns ------- percentile : scalar or ndarray If `q` is a single percentile and `axis=None`, then the result is a scalar. If multiple percentiles are given, first axis of the result corresponds to the percentiles. The other axes are the axes that remain after the reduction of `a`. If the input contains integers or floats smaller than ``float64``, the output data-type is ``float64``. Otherwise, the output data-type is the same as that of the input. If `out` is specified, that array is returned instead. See Also -------- mean, median, nanpercentile Notes ----- Given a vector ``V`` of length ``N``, the ``q``-th percentile of ``V`` is the value ``q/100`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as the median if ``q=50``, the same as the minimum if ``q=0`` and the same as the maximum if ``q=100``. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 50, axis=0) array([[ 6.5, 4.5, 2.5]]) >>> np.percentile(a, 50, axis=1) array([ 7., 2.]) >>> np.percentile(a, 50, axis=1, keepdims=True) array([[ 7.], [ 2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=out) array([[ 6.5, 4.5, 2.5]]) >>> m array([[ 6.5, 4.5, 2.5]]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) array([ 7., 2.]) >>> assert not np.all(a == b) """ q = array(q, dtype=np.float64, copy=True) r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: return r.reshape(q.shape + k) else: return r def _percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar zerod = True q = q[None] else: zerod = False # avoid expensive reductions, relevant for arrays with < O(1000) elements if q.size < 10: for i in range(q.size): if q[i] < 0. or q[i] > 100.: raise ValueError("Percentiles must be in the range [0,100]") q[i] /= 100. else: # faster than any() if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): raise ValueError("Percentiles must be in the range [0,100]") q /= 100. # prepare a for partioning if overwrite_input: if axis is None: ap = a.ravel() else: ap = a else: if axis is None: ap = a.flatten() else: ap = a.copy() if axis is None: axis = 0 Nx = ap.shape[axis] indices = q * (Nx - 1) # round fractional indices according to interpolation method if interpolation == 'lower': indices = floor(indices).astype(intp) elif interpolation == 'higher': indices = ceil(indices).astype(intp) elif interpolation == 'midpoint': indices = 0.5 * (floor(indices) + ceil(indices)) elif interpolation == 'nearest': indices = around(indices).astype(intp) elif interpolation == 'linear': pass # keep index as fraction and interpolate else: raise ValueError( "interpolation can only be 'linear', 'lower' 'higher', " "'midpoint', or 'nearest'") n = np.array(False, dtype=bool) # check for nan's flag if indices.dtype == intp: # take the points along axis # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = concatenate((indices, [-1])) ap.partition(indices, axis=axis) # ensure axis with qth is first ap = np.moveaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices = indices[:-1] n = np.isnan(ap[-1:, ...]) if zerod: indices = indices[0] r = take(ap, indices, axis=axis, out=out) else: # weight the points above and below the indices indices_below = floor(indices).astype(intp) indices_above = indices_below + 1 indices_above[indices_above > Nx - 1] = Nx - 1 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = concatenate((indices_above, [-1])) weights_above = indices - indices_below weights_below = 1.0 - weights_above weights_shape = [1, ] * ap.ndim weights_shape[axis] = len(indices) weights_below.shape = weights_shape weights_above.shape = weights_shape ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first ap = np.moveaxis(ap, axis, 0) weights_below = np.moveaxis(weights_below, axis, 0) weights_above = np.moveaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact): indices_above = indices_above[:-1] n = np.isnan(ap[-1:, ...]) x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first x1 = np.moveaxis(x1, axis, 0) x2 = np.moveaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) x2 = x2.squeeze(0) if out is not None: r = add(x1, x2, out=out) else: r = add(x1, x2) if np.any(n): warnings.warn("Invalid value encountered in percentile", RuntimeWarning, stacklevel=3) if zerod: if ap.ndim == 1: if out is not None: out[...] = a.dtype.type(np.nan) r = out else: r = a.dtype.type(np.nan) else: r[..., n.squeeze(0)] = a.dtype.type(np.nan) else: if r.ndim == 1: r[:] = a.dtype.type(np.nan) else: r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan) return r def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. Integrate `y` (`x`) along given axis. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional The sample points corresponding to the `y` values. If `x` is None, the sample points are assumed to be evenly spaced `dx` apart. The default is None. dx : scalar, optional The spacing between sample points when `x` is None. The default is 1. axis : int, optional The axis along which to integrate. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule. See Also -------- sum, cumsum Notes ----- Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will be taken from `y` array, by default x-axis distances between points will be 1.0, alternatively they can be provided with `x` array or with `dx` scalar. Return value will be equal to combined area under the red lines. References ---------- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- >>> np.trapz([1,2,3]) 4.0 >>> np.trapz([1,2,3], x=[4,6,8]) 8.0 >>> np.trapz([1,2,3], dx=2) 8.0 >>> a = np.arange(6).reshape(2, 3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) array([ 1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) array([ 2., 8.]) """ y = asanyarray(y) if x is None: d = dx else: x = asanyarray(x) if x.ndim == 1: d = diff(x) # reshape to correct shape shape = [1]*y.ndim shape[axis] = d.shape[0] d = d.reshape(shape) else: d = diff(x, axis=axis) nd = y.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) return ret #always succeed def add_newdoc(place, obj, doc): """ Adds documentation to obj which is in module place. If doc is a string add it to obj as a docstring If doc is a tuple, then the first element is interpreted as an attribute of obj and the second as the docstring (method, docstring) If doc is a list, then each element of the list should be a sequence of length two --> [(method1, docstring1), (method2, docstring2), ...] This routine never raises an error. This routine cannot modify read-only docstrings, as appear in new-style classes or built-in functions. Because this routine never raises an error the caller must check manually that the docstrings were changed. """ try: new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): add_docstring(new, doc.strip()) elif isinstance(doc, tuple): add_docstring(getattr(new, doc[0]), doc[1].strip()) elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) except Exception: pass # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. .. versionchanged:: 1.9 1-D and 0-D cases are allowed. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. .. versionadded:: 1.7.0 sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that ``sparse=False, copy=False`` will likely return non-contiguous arrays. Furthermore, more than one element of a broadcast array may refer to a single memory location. If you need to write to the arrays, make copies first. .. versionadded:: 1.7.0 Returns ------- X1, X2,..., XN : ndarray For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. See Also -------- index_tricks.mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" using indexing notation. Examples -------- >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) >>> xv, yv = np.meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = np.meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ ndim = len(xi) copy_ = kwargs.pop('copy', True) sparse = kwargs.pop('sparse', False) indexing = kwargs.pop('indexing', 'xy') if kwargs: raise TypeError("meshgrid() got an unexpected keyword argument '%s'" % (list(kwargs)[0],)) if indexing not in ['xy', 'ij']: raise ValueError( "Valid values for `indexing` are 'xy' and 'ij'.") s0 = (1,) * ndim output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) for i, x in enumerate(xi)] if indexing == 'xy' and ndim > 1: # switch first and second axis output[0].shape = (1, -1) + s0[2:] output[1].shape = (-1, 1) + s0[2:] if not sparse: # Return the full N-D matrix (not only the 1-D vector) output = np.broadcast_arrays(*output, subok=True) if copy_: output = [x.copy() for x in output] return output def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : array_like Input array. obj : slice, int or array of ints Indicate which sub-arrays to remove. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. append : Append elements at the end of an array. Notes ----- Often it is preferable to use a boolean mask. For example: >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further use of `mask`. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], [ 5, 6, 7, 8], [ 9, 10, 11, 12]]) >>> np.delete(arr, 1, 0) array([[ 1, 2, 3, 4], [ 9, 10, 11, 12]]) >>> np.delete(arr, np.s_[::2], 1) array([[ 2, 4], [ 6, 8], [10, 12]]) >>> np.delete(arr, [1,3,5], None) array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = -1 if ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from delete and raise an error", DeprecationWarning, stacklevel=2) if wrap: return wrap(arr) else: return arr.copy(order=arrorder) axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): start, stop, step = obj.indices(N) xr = range(start, stop, step) numtodel = len(xr) if numtodel <= 0: if wrap: return wrap(arr.copy(order=arrorder)) else: return arr.copy(order=arrorder) # Invert if step is negative: if step < 0: step = -step start = xr[-1] stop = xr[0] + 1 newshape[axis] -= numtodel new = empty(newshape, arr.dtype, arrorder) # copy initial chunk if start == 0: pass else: slobj[axis] = slice(None, start) new[slobj] = arr[slobj] # copy end chunck if stop == N: pass else: slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] # copy middle pieces if step == 1: pass else: # use array indexing. keep = ones(stop-start, dtype=bool) keep[:stop-start:step] = False slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) arr = arr[slobj2] slobj2[axis] = keep new[slobj] = arr[slobj2] if wrap: return wrap(new) else: return new _obj = obj obj = np.asarray(obj) # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: warnings.warn("in the future insert will treat boolean arrays and " "array-likes as boolean index instead of casting it " "to integer", FutureWarning, stacklevel=2) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value obj = obj.item() if (obj < -N or obj >= N): raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (obj < 0): obj += N newshape[axis] -= 1 new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) if not np.can_cast(obj, intp, 'same_kind'): # obj.size = 1 special case always failed and would just # give superfluous warnings. # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in delete will result in an " "error in the future", DeprecationWarning, stacklevel=2) obj = obj.astype(intp) keep = ones(N, dtype=bool) # Test if there are out of bound indices, this is deprecated inside_bounds = (obj < N) & (obj >= -N) if not inside_bounds.all(): # 2013-09-24, 1.9 warnings.warn( "in the future out of bounds indices will raise an error " "instead of being ignored by `numpy.delete`.", DeprecationWarning, stacklevel=2) obj = obj[inside_bounds] positive_indices = obj >= 0 if not positive_indices.all(): warnings.warn( "in the future negative indices will not be ignored by " "`numpy.delete`.", FutureWarning, stacklevel=2) obj = obj[positive_indices] keep[obj, ] = False slobj[axis] = keep new = arr[slobj] if wrap: return wrap(new) else: return new def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : array_like Input array. obj : int, slice or sequence of ints Object that defines the index or indices before which `values` is inserted. .. versionadded:: 1.8.0 Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple times). values : array_like Values to insert into `arr`. If the type of `values` is different from that of `arr`, `values` is converted to the type of `arr`. `values` should be shaped so that ``arr[...,obj,...] = values`` is legal. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. See Also -------- append : Append elements at the end of an array. concatenate : Join a sequence of arrays along an existing axis. delete : Delete elements from an array. Notes ----- Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1, 1], [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) array([1, 5, 1, 2, 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], [3, 5, 3]]) Difference between sequence and scalars: >>> np.insert(a, [1], [[1],[2],[3]], axis=1) array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]) >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), ... np.insert(a, [1], [[1],[2],[3]], axis=1)) True >>> b = a.flatten() >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) array([1, 1, 5, 6, 2, 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) array([1, 1, 5, 2, 6, 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting array([1, 1, 7, 0, 2, 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) >>> np.insert(x, idx, 999, axis=1) array([[ 0, 999, 1, 2, 999, 3], [ 4, 999, 5, 6, 999, 7]]) """ wrap = None if type(arr) is not ndarray: try: wrap = arr.__array_wrap__ except AttributeError: pass arr = asarray(arr) ndim = arr.ndim arrorder = 'F' if arr.flags.fnc else 'C' if axis is None: if ndim != 1: arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 elif ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " "from insert and raise an error", DeprecationWarning, stacklevel=2) arr = arr.copy(order=arrorder) arr[...] = values if wrap: return wrap(arr) else: return arr else: axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) if isinstance(obj, slice): # turn it into a range object indices = arange(*obj.indices(N), **{'dtype': intp}) else: # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: # See also delete warnings.warn( "in the future insert will treat boolean arrays and " "array-likes as a boolean index instead of casting it to " "integer", FutureWarning, stacklevel=2) indices = indices.astype(intp) # Code after warning period: #if obj.ndim != 1: # raise ValueError('boolean array argument obj to insert ' # 'must be one dimensional') #indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " "or scalar") if indices.size == 1: index = indices.item() if index < -N or index > N: raise IndexError( "index %i is out of bounds for axis %i with " "size %i" % (obj, axis, N)) if (index < 0): index += N # There are some object array corner cases here, but we cannot avoid # that: values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) if indices.ndim == 0: # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) values = np.moveaxis(values, 0, axis) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) new[slobj] = arr[slobj] slobj[axis] = slice(index, index+numnew) new[slobj] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) new[slobj] = arr[slobj2] if wrap: return wrap(new) return new elif indices.size == 0 and not isinstance(obj, np.ndarray): # Can safely cast the empty list to intp indices = indices.astype(intp) if not np.can_cast(indices, intp, 'same_kind'): # 2013-09-24, 1.9 warnings.warn( "using a non-integer array as obj in insert will result in an " "error in the future", DeprecationWarning, stacklevel=2) indices = indices.astype(intp) indices[indices < 0] += N numnew = len(indices) order = indices.argsort(kind='mergesort') # stable sort indices[order] += np.arange(numnew) newshape[axis] += numnew old_mask = ones(newshape[axis], dtype=bool) old_mask[indices] = False new = empty(newshape, arr.dtype, arrorder) slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask new[slobj] = values new[slobj2] = arr if wrap: return wrap(new) return new def append(arr, values, axis=None): """ Append values to the end of an array. Parameters ---------- arr : array_like Values are appended to a copy of this array. values : array_like These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. See Also -------- insert : Insert elements into an array. delete : Delete elements from an array. Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... ValueError: arrays must have same number of dimensions """ arr = asanyarray(arr) if axis is None: if arr.ndim != 1: arr = arr.ravel() values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis)
bsd-3-clause
gojira/tensorflow
tensorflow/contrib/training/python/training/feeding_queue_runner_test.py
76
5052
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests `FeedingQueueRunner` using arrays and `DataFrames`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data from tensorflow.python.framework import ops from tensorflow.python.platform import test from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner_impl # pylint: disable=g-import-not-at-top try: import pandas as pd HAS_PANDAS = True except ImportError: HAS_PANDAS = False def get_rows(array, row_indices): rows = [array[i] for i in row_indices] return np.vstack(rows) class FeedingQueueRunnerTestCase(test.TestCase): """Tests for `FeedingQueueRunner`.""" def testArrayFeeding(self): with ops.Graph().as_default(): array = np.arange(32).reshape([16, 2]) q = enqueue_data(array, capacity=100) batch_size = 3 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for i in range(100): indices = [ j % array.shape[0] for j in range(batch_size * i, batch_size * (i + 1)) ] expected_dq = get_rows(array, indices) dq = sess.run(dq_op) np.testing.assert_array_equal(indices, dq[0]) np.testing.assert_array_equal(expected_dq, dq[1]) coord.request_stop() coord.join(threads) def testArrayFeedingMultiThread(self): with ops.Graph().as_default(): array = np.arange(256).reshape([128, 2]) q = enqueue_data(array, capacity=128, num_threads=8, shuffle=True) batch_size = 3 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for _ in range(100): dq = sess.run(dq_op) indices = dq[0] expected_dq = get_rows(array, indices) np.testing.assert_array_equal(expected_dq, dq[1]) coord.request_stop() coord.join(threads) def testPandasFeeding(self): if not HAS_PANDAS: return with ops.Graph().as_default(): array1 = np.arange(32) array2 = np.arange(32, 64) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96)) q = enqueue_data(df, capacity=100) batch_size = 5 dq_op = q.dequeue_many(5) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for i in range(100): indices = [ j % array1.shape[0] for j in range(batch_size * i, batch_size * (i + 1)) ] expected_df_indices = df.index[indices] expected_rows = df.iloc[indices] dq = sess.run(dq_op) np.testing.assert_array_equal(expected_df_indices, dq[0]) for col_num, col in enumerate(df.columns): np.testing.assert_array_equal(expected_rows[col].values, dq[col_num + 1]) coord.request_stop() coord.join(threads) def testPandasFeedingMultiThread(self): if not HAS_PANDAS: return with ops.Graph().as_default(): array1 = np.arange(128, 256) array2 = 2 * array1 df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128)) q = enqueue_data(df, capacity=128, num_threads=8, shuffle=True) batch_size = 5 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for _ in range(100): dq = sess.run(dq_op) indices = dq[0] expected_rows = df.iloc[indices] for col_num, col in enumerate(df.columns): np.testing.assert_array_equal(expected_rows[col].values, dq[col_num + 1]) coord.request_stop() coord.join(threads) if __name__ == "__main__": test.main()
apache-2.0
tomlof/scikit-learn
sklearn/datasets/tests/test_20news.py
75
3266
"""Test the 20news downloader, if the data is available.""" import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import SkipTest from sklearn import datasets def test_20news(): try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract a reduced dataset data2cats = datasets.fetch_20newsgroups( subset='all', categories=data.target_names[-1:-3:-1], shuffle=False) # Check that the ordering of the target_names is the same # as the ordering in the full dataset assert_equal(data2cats.target_names, data.target_names[-2:]) # Assert that we have only 0 and 1 as labels assert_equal(np.unique(data2cats.target).tolist(), [0, 1]) # Check that the number of filenames is consistent with data/target assert_equal(len(data2cats.filenames), len(data2cats.target)) assert_equal(len(data2cats.filenames), len(data2cats.data)) # Check that the first entry of the reduced dataset corresponds to # the first entry of the corresponding category in the full dataset entry1 = data2cats.data[0] category = data2cats.target_names[data2cats.target[0]] label = data.target_names.index(category) entry2 = data.data[np.where(data.target == label)[0][0]] assert_equal(entry1, entry2) def test_20news_length_consistency(): """Checks the length consistencies within the bunch This is a non-regression test for a bug present in 0.16.1. """ try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract the full dataset data = datasets.fetch_20newsgroups(subset='all') assert_equal(len(data['data']), len(data.data)) assert_equal(len(data['target']), len(data.target)) assert_equal(len(data['filenames']), len(data.filenames)) def test_20news_vectorized(): try: datasets.fetch_20newsgroups(subset='all', download_if_missing=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # test subset = train bunch = datasets.fetch_20newsgroups_vectorized(subset="train") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314, 130107)) assert_equal(bunch.target.shape[0], 11314) assert_equal(bunch.data.dtype, np.float64) # test subset = test bunch = datasets.fetch_20newsgroups_vectorized(subset="test") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (7532, 130107)) assert_equal(bunch.target.shape[0], 7532) assert_equal(bunch.data.dtype, np.float64) # test subset = all bunch = datasets.fetch_20newsgroups_vectorized(subset='all') assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314 + 7532, 130107)) assert_equal(bunch.target.shape[0], 11314 + 7532) assert_equal(bunch.data.dtype, np.float64)
bsd-3-clause
lehai0609/ThinkStats2
code/hinc.py
67
1494
"""This file contains code used in "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function import numpy as np import pandas import thinkplot import thinkstats2 def Clean(s): """Converts dollar amounts to integers.""" try: return int(s.lstrip('$').replace(',', '')) except ValueError: if s == 'Under': return 0 elif s == 'over': return np.inf return None def ReadData(filename='hinc06.csv'): """Reads filename and returns populations in thousands filename: string returns: pandas Series of populations in thousands """ data = pandas.read_csv(filename, header=None, skiprows=9) cols = data[[0, 1]] res = [] for _, row in cols.iterrows(): label, freq = row.values freq = int(freq.replace(',', '')) t = label.split() low, high = Clean(t[0]), Clean(t[-1]) res.append((high, freq)) df = pandas.DataFrame(res) # correct the first range df[0][0] -= 1 # compute the cumulative sum of the freqs df[2] = df[1].cumsum() # normalize the cumulative freqs total = df[2][41] df[3] = df[2] / total # add column names df.columns = ['income', 'freq', 'cumsum', 'ps'] return df def main(): df = ReadData() print(df) if __name__ == "__main__": main()
gpl-3.0
aleksandr-bakanov/astropy
astropy/convolution/kernels.py
6
33186
# Licensed under a 3-clause BSD style license - see LICENSE.rst import math import numpy as np from .core import Kernel1D, Kernel2D, Kernel from .utils import has_even_axis, raise_even_kernel_exception from astropy.modeling import models from astropy.modeling.core import Fittable1DModel, Fittable2DModel from astropy.utils.decorators import deprecated __all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel', 'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel', 'Trapezoid1DKernel', 'RickerWavelet1DKernel', 'RickerWavelet2DKernel', 'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel', 'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel'] def _round_up_to_odd_integer(value): i = math.ceil(value) if i % 2 == 0: return i + 1 else: return i class Gaussian1DKernel(Kernel1D): """ 1D Gaussian filter kernel. The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. Parameters ---------- stddev : number Standard deviation of the Gaussian kernel. x_size : odd int, optional Size of the kernel array. Default = 8 * stddev mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. Very slow. factor : number, optional Factor of oversampling. Default factor = 10. If the factor is too large, evaluation can be very slow. See Also -------- Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Gaussian1DKernel gauss_1D_kernel = Gaussian1DKernel(10) plt.plot(gauss_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _separable = True _is_bool = False def __init__(self, stddev, **kwargs): self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev), 0, stddev) self._default_size = _round_up_to_odd_integer(8 * stddev) super().__init__(**kwargs) self._truncation = np.abs(1. - self._array.sum()) class Gaussian2DKernel(Kernel2D): """ 2D Gaussian filter kernel. The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. Parameters ---------- x_stddev : float Standard deviation of the Gaussian in x before rotating by theta. y_stddev : float Standard deviation of the Gaussian in y before rotating by theta. theta : float or :class:`~astropy.units.Quantity` Rotation angle. If passed as a float, it is assumed to be in radians. The rotation angle increases counterclockwise. x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * stddev. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * stddev. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Gaussian2DKernel gaussian_2D_kernel = Gaussian2DKernel(10) plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _separable = True _is_bool = False def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs): if y_stddev is None: y_stddev = x_stddev self._model = models.Gaussian2D(1. / (2 * np.pi * x_stddev * y_stddev), 0, 0, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta) self._default_size = _round_up_to_odd_integer( 8 * np.max([x_stddev, y_stddev])) super().__init__(**kwargs) self._truncation = np.abs(1. - self._array.sum()) class Box1DKernel(Kernel1D): """ 1D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic and can produce artifacts, when applied repeatedly to the same data. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by weighting the edge pixels with 1/2. E.g a Box kernel with an effective smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5]. Parameters ---------- width : number Width of the filter kernel. mode : str, optional One of the following discretization modes: * 'center' Discretize model by taking the value at the center of the bin. * 'linear_interp' (default) Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel Examples -------- Kernel response function: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Box1DKernel box_1D_kernel = Box1DKernel(9) plt.plot(box_1D_kernel, drawstyle='steps') plt.xlim(-1, 9) plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _separable = True _is_bool = True def __init__(self, width, **kwargs): self._model = models.Box1D(1. / width, 0, width) self._default_size = _round_up_to_odd_integer(width) kwargs['mode'] = 'linear_interp' super().__init__(**kwargs) self._truncation = 0 self.normalize() class Box2DKernel(Kernel2D): """ 2D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic and can produce artifact, when applied repeatedly to the same data. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by weighting the edge pixels with 1/2. Parameters ---------- width : number Width of the filter kernel. mode : str, optional One of the following discretization modes: * 'center' Discretize model by taking the value at the center of the bin. * 'linear_interp' (default) Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Box2DKernel box_2D_kernel = Box2DKernel(9) plt.imshow(box_2D_kernel, interpolation='none', origin='lower', vmin=0.0, vmax=0.015) plt.xlim(-1, 9) plt.ylim(-1, 9) plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _separable = True _is_bool = True def __init__(self, width, **kwargs): self._model = models.Box2D(1. / width ** 2, 0, 0, width, width) self._default_size = _round_up_to_odd_integer(width) kwargs['mode'] = 'linear_interp' super().__init__(**kwargs) self._truncation = 0 self.normalize() class Tophat2DKernel(Kernel2D): """ 2D Tophat filter kernel. The Tophat filter is an isotropic smoothing filter. It can produce artifacts when applied repeatedly on the same data. Parameters ---------- radius : int Radius of the filter kernel. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Tophat2DKernel tophat_2D_kernel = Tophat2DKernel(40) plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius, **kwargs): self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius) self._default_size = _round_up_to_odd_integer(2 * radius) super().__init__(**kwargs) self._truncation = 0 class Ring2DKernel(Kernel2D): """ 2D Ring filter kernel. The Ring filter kernel is the difference between two Tophat kernels of different width. This kernel is useful for, e.g., background estimation. Parameters ---------- radius_in : number Inner radius of the ring kernel. width : number Width of the ring kernel. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Ring2DKernel ring_2D_kernel = Ring2DKernel(9, 8) plt.imshow(ring_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius_in, width, **kwargs): radius_out = radius_in + width self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)), 0, 0, radius_in, width) self._default_size = _round_up_to_odd_integer(2 * radius_out) super().__init__(**kwargs) self._truncation = 0 class Trapezoid1DKernel(Kernel1D): """ 1D trapezoid kernel. Parameters ---------- width : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, RickerWavelet1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Trapezoid1DKernel trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2) plt.plot(trapezoid_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('amplitude') plt.xlim(-1, 28) plt.show() """ _is_bool = False def __init__(self, width, slope=1., **kwargs): self._model = models.Trapezoid1D(1, 0, width, slope) self._default_size = _round_up_to_odd_integer(width + 2. / slope) super().__init__(**kwargs) self._truncation = 0 self.normalize() class TrapezoidDisk2DKernel(Kernel2D): """ 2D trapezoid kernel. Parameters ---------- radius : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import TrapezoidDisk2DKernel trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2) plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, slope=1., **kwargs): self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope) self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope) super().__init__(**kwargs) self._truncation = 0 self.normalize() class RickerWavelet1DKernel(Kernel1D): """ 1D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat" kernel). The Ricker wavelet, or inverted Gaussian-Laplace filter, is a bandpass filter. It smooths the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this kernel. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * width. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import RickerWavelet1DKernel ricker_1d_kernel = RickerWavelet1DKernel(10) plt.plot(ricker_1d_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _is_bool = True def __init__(self, width, **kwargs): amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3) self._model = models.RickerWavelet1D(amplitude, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super().__init__(**kwargs) self._truncation = np.abs(self._array.sum() / self._array.size) class RickerWavelet2DKernel(Kernel2D): """ 2D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat" kernel). The Ricker wavelet, or inverted Gaussian-Laplace filter, is a bandpass filter. It smooths the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (pi * width ** 4). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this kernel. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * width. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * width. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import RickerWavelet2DKernel ricker_2d_kernel = RickerWavelet2DKernel(10) plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, width, **kwargs): amplitude = 1.0 / (np.pi * width ** 4) self._model = models.RickerWavelet2D(amplitude, 0, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super().__init__(**kwargs) self._truncation = np.abs(self._array.sum() / self._array.size) class AiryDisk2DKernel(Kernel2D): """ 2D Airy disk kernel. This kernel models the diffraction pattern of a circular aperture. This kernel is normalized to a peak value of 1. Parameters ---------- radius : float The radius of the Airy disk kernel (radius of the first zero). x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * radius. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * radius. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import AiryDisk2DKernel airydisk_2D_kernel = AiryDisk2DKernel(10) plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, **kwargs): self._model = models.AiryDisk2D(1, 0, 0, radius) self._default_size = _round_up_to_odd_integer(8 * radius) super().__init__(**kwargs) self.normalize() self._truncation = None class Moffat2DKernel(Kernel2D): """ 2D Moffat kernel. This kernel is a typical model for a seeing limited PSF. Parameters ---------- gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * radius. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * radius. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Moffat2DKernel moffat_2D_kernel = Moffat2DKernel(3, 2) plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, gamma, alpha, **kwargs): # Compute amplitude, from # https://en.wikipedia.org/wiki/Moffat_distribution amplitude = (alpha - 1.0) / (np.pi * gamma * gamma) self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha) self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm) super().__init__(**kwargs) self.normalize() self._truncation = None class Model1DKernel(Kernel1D): """ Create kernel from 1D model. The model has to be centered on x = 0. Parameters ---------- model : `~astropy.modeling.Fittable1DModel` Kernel response function model x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * width. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. Raises ------ TypeError If model is not an instance of `~astropy.modeling.Fittable1DModel` See also -------- Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel` CustomKernel : Create kernel from list or array Examples -------- Define a Gaussian1D model: >>> from astropy.modeling.models import Gaussian1D >>> from astropy.convolution.kernels import Model1DKernel >>> gauss = Gaussian1D(1, 0, 2) And create a custom one dimensional kernel from it: >>> gauss_kernel = Model1DKernel(gauss, x_size=9) This kernel can now be used like a usual Astropy kernel. """ _separable = False _is_bool = False def __init__(self, model, **kwargs): if isinstance(model, Fittable1DModel): self._model = model else: raise TypeError("Must be Fittable1DModel") super().__init__(**kwargs) class Model2DKernel(Kernel2D): """ Create kernel from 2D model. The model has to be centered on x = 0 and y = 0. Parameters ---------- model : `~astropy.modeling.Fittable2DModel` Kernel response function model x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * width. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * width. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. Raises ------ TypeError If model is not an instance of `~astropy.modeling.Fittable2DModel` See also -------- Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel` CustomKernel : Create kernel from list or array Examples -------- Define a Gaussian2D model: >>> from astropy.modeling.models import Gaussian2D >>> from astropy.convolution.kernels import Model2DKernel >>> gauss = Gaussian2D(1, 0, 0, 2, 2) And create a custom two dimensional kernel from it: >>> gauss_kernel = Model2DKernel(gauss, x_size=9) This kernel can now be used like a usual astropy kernel. """ _is_bool = False _separable = False def __init__(self, model, **kwargs): self._separable = False if isinstance(model, Fittable2DModel): self._model = model else: raise TypeError("Must be Fittable2DModel") super().__init__(**kwargs) class PSFKernel(Kernel2D): """ Initialize filter kernel from astropy PSF instance. """ _separable = False def __init__(self): raise NotImplementedError('Not yet implemented') class CustomKernel(Kernel): """ Create filter kernel from list or array. Parameters ---------- array : list or array Filter kernel array. Size must be odd. Raises ------ TypeError If array is not a list or array. KernelSizeError If array size is even. See also -------- Model2DKernel, Model1DKernel Examples -------- Define one dimensional array: >>> from astropy.convolution.kernels import CustomKernel >>> import numpy as np >>> array = np.array([1, 2, 3, 2, 1]) >>> kernel = CustomKernel(array) >>> kernel.dimension 1 Define two dimensional array: >>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]) >>> kernel = CustomKernel(array) >>> kernel.dimension 2 """ def __init__(self, array): self.array = array super().__init__(self._array) @property def array(self): """ Filter kernel array. """ return self._array @array.setter def array(self, array): """ Filter kernel array setter """ if isinstance(array, np.ndarray): self._array = array.astype(np.float64) elif isinstance(array, list): self._array = np.array(array, dtype=np.float64) else: raise TypeError("Must be list or array.") # Check if array is odd in all axes if has_even_axis(self): raise_even_kernel_exception() # Check if array is bool ones = self._array == 1. zeros = self._array == 0 self._is_bool = bool(np.all(np.logical_or(ones, zeros))) self._truncation = 0.0 @deprecated('4.0', alternative='RickerWavelet1DKernel') class MexicanHat1DKernel(RickerWavelet1DKernel): pass @deprecated('4.0', alternative='RickerWavelet2DKernel') class MexicanHat2DKernel(RickerWavelet2DKernel): pass
bsd-3-clause
lakshayg/tensorflow
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
52
69800
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for DNNLinearCombinedEstimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import json import tempfile import numpy as np from tensorflow.contrib.layers.python.layers import feature_column from tensorflow.contrib.learn.python.learn import experiment from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import _sklearn from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import test_data from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.python.feature_column import feature_column as fc_core from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import losses from tensorflow.python.platform import test from tensorflow.python.training import adagrad from tensorflow.python.training import ftrl from tensorflow.python.training import input as input_lib from tensorflow.python.training import learning_rate_decay from tensorflow.python.training import monitored_session from tensorflow.python.training import server_lib from tensorflow.python.training import session_run_hook from tensorflow.python.training import sync_replicas_optimizer from tensorflow.python.training import training_util def _assert_metrics_in_range(keys, metrics): epsilon = 0.00001 # Added for floating point edge cases. for key in keys: estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key, metrics) class _CheckCallsHead(head_lib.Head): """Head that checks whether head_ops is called.""" def __init__(self): self._head_ops_called_times = 0 @property def logits_dimension(self): return 1 def create_model_fn_ops( self, mode, features, labels=None, train_op_fn=None, logits=None, logits_input=None, scope=None): """See `_Head`.""" self._head_ops_called_times += 1 loss = losses.mean_squared_error(labels, logits) return model_fn.ModelFnOps( mode, predictions={'loss': loss}, loss=loss, train_op=train_op_fn(loss), eval_metric_ops={'loss': loss}) @property def head_ops_called_times(self): return self._head_ops_called_times class _StepCounterHook(session_run_hook.SessionRunHook): """Counts the number of training steps.""" def __init__(self): self._steps = 0 def after_run(self, run_context, run_values): del run_context, run_values self._steps += 1 @property def steps(self): return self._steps class EmbeddingMultiplierTest(test.TestCase): """dnn_model_fn tests.""" def testRaisesNonEmbeddingColumn(self): one_hot_language = feature_column.one_hot_column( feature_column.sparse_column_with_hash_bucket('language', 10)) params = { 'dnn_feature_columns': [one_hot_language], 'head': head_lib.multi_class_head(2), 'dnn_hidden_units': [1], # Set lr mult to 0. to keep embeddings constant. 'embedding_lr_multipliers': { one_hot_language: 0.0 }, 'dnn_optimizer': 'Adagrad', } features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32) with self.assertRaisesRegexp(ValueError, 'can only be defined for embedding columns'): dnn_linear_combined._dnn_linear_combined_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params) def testMultipliesGradient(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) embedding_wire = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('wire', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) params = { 'dnn_feature_columns': [embedding_language, embedding_wire], 'head': head_lib.multi_class_head(2), 'dnn_hidden_units': [1], # Set lr mult to 0. to keep language embeddings constant, whereas wire # embeddings will be trained. 'embedding_lr_multipliers': { embedding_language: 0.0 }, 'dnn_optimizer': 'Adagrad', } with ops.Graph().as_default(): features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32) training_util.create_global_step() model_ops = dnn_linear_combined._dnn_linear_combined_model_fn( features, labels, model_fn.ModeKeys.TRAIN, params) with monitored_session.MonitoredSession() as sess: language_var = dnn_linear_combined._get_embedding_variable( embedding_language, 'dnn', 'dnn/input_from_feature_columns') language_initial_value = sess.run(language_var) for _ in range(2): _, language_value = sess.run([model_ops.train_op, language_var]) self.assertAllClose(language_value, language_initial_value) # We could also test that wire_value changed, but that test would be flaky. class DNNLinearCombinedEstimatorTest(test.TestCase): def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedEstimator) def testNoFeatureColumns(self): with self.assertRaisesRegexp( ValueError, 'Either linear_feature_columns or dnn_feature_columns must be defined'): dnn_linear_combined.DNNLinearCombinedEstimator( head=_CheckCallsHead(), linear_feature_columns=None, dnn_feature_columns=None, dnn_hidden_units=[3, 3]) def testCheckCallsHead(self): """Tests binary classification using matrix data as input.""" head = _CheckCallsHead() iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column('feature', dimension=4)] bucketized_feature = [feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))] estimator = dnn_linear_combined.DNNLinearCombinedEstimator( head, linear_feature_columns=bucketized_feature, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10) self.assertEqual(1, head.head_ops_called_times) estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10) self.assertEqual(2, head.head_ops_called_times) estimator.predict(input_fn=test_data.iris_input_multiclass_fn) self.assertEqual(3, head.head_ops_called_times) class DNNLinearCombinedClassifierTest(test.TestCase): def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedClassifier) def testExperimentIntegration(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] exp = experiment.Experiment( estimator=dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]), train_input_fn=test_data.iris_input_logistic_fn, eval_input_fn=test_data.iris_input_logistic_fn) exp.test() def testNoFeatureColumns(self): with self.assertRaisesRegexp( ValueError, 'Either linear_feature_columns or dnn_feature_columns must be defined'): dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=None, dnn_feature_columns=None, dnn_hidden_units=[3, 3]) def testNoDnnHiddenUnits(self): def _input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.real_valued_column('age') with self.assertRaisesRegexp( ValueError, 'dnn_hidden_units must be defined when dnn_feature_columns is ' 'specified'): classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[age, language]) classifier.fit(input_fn=_input_fn, steps=2) def testSyncReplicasOptimizerUnsupported(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer( opt=adagrad.AdagradOptimizer(learning_rate=0.1), replicas_to_aggregate=1, total_num_replicas=1) sync_hook = sync_optimizer.make_session_run_hook(is_chief=True) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=sync_optimizer) with self.assertRaisesRegexp( ValueError, 'SyncReplicasOptimizer is not supported in DNNLinearCombined model'): classifier.fit( input_fn=test_data.iris_input_multiclass_fn, steps=100, monitors=[sync_hook]) def testEmbeddingMultiplier(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[embedding_language], dnn_hidden_units=[3, 3], embedding_lr_multipliers={embedding_language: 0.8}) self.assertEqual({ embedding_language: 0.8 }, classifier.params['embedding_lr_multipliers']) def testInputPartitionSize(self): def _input_fn_float_label(num_epochs=None): features = { 'language': sparse_tensor.SparseTensor( values=input_lib.limit_epochs( ['en', 'fr', 'zh'], num_epochs=num_epochs), indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32) return features, labels language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) feature_columns = [ feature_column.embedding_column(language_column, dimension=1), ] # Set num_ps_replica to be 10 and the min slice size to be extremely small, # so as to ensure that there'll be 10 partititions produced. config = run_config.RunConfig(tf_random_seed=1) config._num_ps_replicas = 10 classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=2, dnn_feature_columns=feature_columns, dnn_hidden_units=[3, 3], dnn_optimizer='Adagrad', config=config, input_layer_min_slice_size=1) # Ensure the param is passed in. self.assertTrue(callable(classifier.params['input_layer_partitioner'])) # Ensure the partition count is 10. classifier.fit(input_fn=_input_fn_float_label, steps=50) partition_count = 0 for name in classifier.get_variable_names(): if 'language_embedding' in name and 'Adagrad' in name: partition_count += 1 self.assertEqual(10, partition_count) def testLogisticRegression_MatrixData(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_feature = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_feature, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testLogisticRegression_TensorData(self): """Tests binary classification using Tensor data as input.""" def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant( iris.data[:, i], dtype=dtypes.float32), [-1, 1]) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [60, 0]], dense_shape=[len(iris.target), 2]) labels = array_ops.reshape( constant_op.constant( iris.target, dtype=dtypes.int32), [-1, 1]) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column(str(i)) for i in range(4) ] linear_features = [ feature_column.bucketized_column(cont_features[i], test_data.get_quantile_based_buckets( iris.data[:, i], 10)) for i in range(4) ] linear_features.append( feature_column.sparse_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testEstimatorWithCoreFeatureColumns(self): """Tests binary classification using Tensor data as input.""" def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant(iris.data[:, i], dtype=dtypes.float32), [-1, 1]) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [60, 0]], dense_shape=[len(iris.target), 2]) labels = array_ops.reshape( constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1]) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [fc_core.numeric_column(str(i)) for i in range(4)] linear_features = [ fc_core.bucketized_column( cont_features[i], sorted(set(test_data.get_quantile_based_buckets( iris.data[:, i], 10)))) for i in range(4) ] linear_features.append( fc_core.categorical_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(): features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant([[1], [0], [0]]) return features, labels sparse_features = [ # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) ] embedding_features = [ feature_column.embedding_column( sparse_features[0], dimension=1) ] tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=sparse_features, dnn_feature_columns=embedding_features, dnn_hidden_units=[3, 3], config=config) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=1) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testMultiClass(self): """Tests multi-class classification using matrix data as input. Please see testLogisticRegression_TensorData() for how to use Tensor data as input instead. """ iris = base.load_iris() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=bucketized_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testMultiClassLabelKeys(self): """Tests n_classes > 2 with label_keys vocabulary for labels.""" # Byte literals needed for python3 test to pass. label_keys = [b'label0', b'label1', b'label2'] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[.8], [0.2], [.1]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=input_lib.limit_epochs( ['en', 'fr', 'zh'], num_epochs=num_epochs), indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant( [[label_keys[1]], [label_keys[0]], [label_keys[0]]], dtype=dtypes.string) return features, labels language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=[language_column], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], label_keys=label_keys) classifier.fit(input_fn=_input_fn, steps=50) scores = classifier.evaluate(input_fn=_input_fn, steps=1) _assert_metrics_in_range(('accuracy',), scores) self.assertIn('loss', scores) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predicted_classes = list( classifier.predict_classes( input_fn=predict_input_fn, as_iterable=True)) self.assertEqual(3, len(predicted_classes)) for pred in predicted_classes: self.assertIn(pred, label_keys) predictions = list( classifier.predict(input_fn=predict_input_fn, as_iterable=True)) self.assertAllEqual(predicted_classes, predictions) def testLoss(self): """Tests loss calculation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # The logistic prediction should be (y = 0.25). features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} labels = constant_op.constant([[1], [0], [0], [0]]) return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=2, linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_train, steps=1) # Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562 self.assertAlmostEqual(0.562, scores['loss'], delta=0.1) def testLossWithWeights(self): """Tests loss calculation with weights.""" def _input_fn_train(): # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x)) # The logistic prediction should be (y = 0.25). features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } labels = constant_op.constant([[1.], [0.], [0.], [0.]]) return features, labels def _input_fn_eval(): # 4 rows, with different weights. features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[7.], [1.], [1.], [1.]]) } labels = constant_op.constant([[1.], [0.], [0.], [0.]]) return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( weight_column_name='w', n_classes=2, linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1) # Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06 self.assertAlmostEqual(1.06, scores['loss'], delta=0.1) def testTrainWithWeights(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1], [0], [0], [0]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x). labels = constant_op.constant([[1], [1], [1], [1]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByObject(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByString(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer='Ftrl', dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer='Adagrad') classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByFunction(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] def _optimizer_exp_decay(): global_step = training_util.get_global_step() learning_rate = learning_rate_decay.exponential_decay( learning_rate=0.1, global_step=global_step, decay_steps=100, decay_rate=0.001) return adagrad.AdagradOptimizer(learning_rate=learning_rate) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer=_optimizer_exp_decay, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=_optimizer_exp_decay) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testPredict(self): """Tests weight column in evaluation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1], [0], [0], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)} return features, labels def _input_fn_predict(): y = input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=1) features = {'x': y} return features classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=100) probs = list(classifier.predict_proba(input_fn=_input_fn_predict)) self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05) classes = list(classifier.predict_classes(input_fn=_input_fn_predict)) self.assertListEqual([0] * 4, classes) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1], [0], [0], [0]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): # For the case of binary classification, the 2nd column of "predictions" # denotes the model predictions. labels = math_ops.to_float(labels) predictions = array_ops.strided_slice( predictions, [0, 1], [-1, 2], end_mask=1) return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'my_accuracy': MetricSpec( metric_fn=metric_ops.streaming_accuracy, prediction_key='classes'), 'my_precision': MetricSpec( metric_fn=metric_ops.streaming_precision, prediction_key='classes'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='probabilities') }) self.assertTrue( set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset( set(scores.keys()))) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(classifier.predict_classes( input_fn=predict_input_fn))) self.assertEqual( _sklearn.accuracy_score([1, 0, 0, 0], predictions), scores['my_accuracy']) # Test the case where the 2nd element of the key is neither "classes" nor # "probabilities". with self.assertRaisesRegexp(KeyError, 'bad_type'): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc}) # Test the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ ('bad_length_name', 'classes', 'bad_length'): metric_ops.streaming_accuracy }) # Test the case where the prediction_key is neither "classes" nor # "probabilities". with self.assertRaisesRegexp(KeyError, 'bad_type'): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) def testVariableQuery(self): """Tests get_variable_names and get_variable_value.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=500) var_names = classifier.get_variable_names() self.assertGreater(len(var_names), 3) for name in var_names: classifier.get_variable_value(name) def testExport(self): """Tests export model for servo.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[ feature_column.real_valued_column('age'), language, ], dnn_feature_columns=[ feature_column.embedding_column( language, dimension=1), ], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=input_fn, steps=100) export_dir = tempfile.mkdtemp() input_feature_key = 'examples' def serving_input_fn(): features, targets = input_fn() features[input_feature_key] = array_ops.placeholder(dtypes.string) return features, targets classifier.export( export_dir, serving_input_fn, input_feature_key, use_deprecated_input_fn=False) def testCenteredBias(self): """Tests bias is centered or not.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], enable_centered_bias=True) classifier.fit(input_fn=_input_fn_train, steps=1000) self.assertIn('binary_logistic_head/centered_bias_weight', classifier.get_variable_names()) # logodds(0.75) = 1.09861228867 self.assertAlmostEqual( 1.0986, float(classifier.get_variable_value( 'binary_logistic_head/centered_bias_weight')[0]), places=2) def testDisableCenteredBias(self): """Tests bias is centered or not.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], enable_centered_bias=False) classifier.fit(input_fn=_input_fn_train, steps=500) self.assertNotIn('centered_bias_weight', classifier.get_variable_names()) def testGlobalStepLinearOnly(self): """Tests global step update for linear-only model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testGlobalStepDNNOnly(self): """Tests global step update for dnn-only model.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testGlobalStepDNNLinearCombinedBug(self): """Tests global step update for dnn-linear combined model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language], dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3], fix_global_step_increment_bug=False) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) global_step = classifier.get_variable_value('global_step') if global_step == 100: # Expected is 100, but because of the global step increment bug, is 50. self.assertEqual(50, step_counter.steps) else: # Occasionally, training stops when global_step == 101, due to a race # condition. self.assertEqual(51, step_counter.steps) def testGlobalStepDNNLinearCombinedBugFixed(self): """Tests global step update for dnn-linear combined model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language], dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3], fix_global_step_increment_bug=True) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testLinearOnly(self): """Tests that linear-only instantiation works.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.real_valued_column('age') classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) variable_names = classifier.get_variable_names() self.assertNotIn('dnn/logits/biases', variable_names) self.assertNotIn('dnn/logits/weights', variable_names) self.assertIn('linear/bias_weight', variable_names) self.assertIn('linear/age/weight', variable_names) self.assertIn('linear/language/weights', variable_names) self.assertEquals( 1, len(classifier.get_variable_value('linear/age/weight'))) self.assertEquals( 100, len(classifier.get_variable_value('linear/language/weights'))) def testLinearOnlyOneFeature(self): """Tests that linear-only instantiation works for one feature only.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 99) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) variable_names = classifier.get_variable_names() self.assertNotIn('dnn/logits/biases', variable_names) self.assertNotIn('dnn/logits/weights', variable_names) self.assertIn('linear/bias_weight', variable_names) self.assertIn('linear/language/weights', variable_names) self.assertEquals( 1, len(classifier.get_variable_value('linear/bias_weight'))) self.assertEquals( 99, len(classifier.get_variable_value('linear/language/weights'))) def testDNNOnly(self): """Tests that DNN-only instantiation works.""" cont_features = [feature_column.real_valued_column('feature', dimension=4)] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000) classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100) variable_names = classifier.get_variable_names() self.assertIn('dnn/hiddenlayer_0/weights', variable_names) self.assertIn('dnn/hiddenlayer_0/biases', variable_names) self.assertIn('dnn/hiddenlayer_1/weights', variable_names) self.assertIn('dnn/hiddenlayer_1/biases', variable_names) self.assertIn('dnn/logits/weights', variable_names) self.assertIn('dnn/logits/biases', variable_names) self.assertNotIn('linear/bias_weight', variable_names) self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names) def testDNNWeightsBiasesNames(self): """Tests the names of DNN weights and biases in the checkpoints.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=5) variable_names = classifier.get_variable_names() self.assertIn('dnn/hiddenlayer_0/weights', variable_names) self.assertIn('dnn/hiddenlayer_0/biases', variable_names) self.assertIn('dnn/hiddenlayer_1/weights', variable_names) self.assertIn('dnn/hiddenlayer_1/biases', variable_names) self.assertIn('dnn/logits/weights', variable_names) self.assertIn('dnn/logits/biases', variable_names) class DNNLinearCombinedRegressorTest(test.TestCase): def testExperimentIntegration(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] exp = experiment.Experiment( estimator=dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]), train_input_fn=test_data.iris_input_logistic_fn, eval_input_fn=test_data.iris_input_logistic_fn) exp.test() def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedRegressor) def testRegression_MatrixData(self): """Tests regression using matrix data as input.""" cont_features = [feature_column.real_valued_column('feature', dimension=4)] regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10) scores = regressor.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=1) self.assertIn('loss', scores.keys()) def testRegression_TensorData(self): """Tests regression using tensor data as input.""" def _input_fn(): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn, steps=10) classifier.evaluate(input_fn=_input_fn, steps=1) def testLoss(self): """Tests loss calculation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_train, steps=1) # Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875 self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1) def testLossWithWeights(self): """Tests loss calculation with weights.""" def _input_fn_train(): # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels def _input_fn_eval(): # 4 rows, with different weights. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[7.], [1.], [1.], [1.]]) } return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125 self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1) def testTrainWithWeights(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x) labels = constant_op.constant([[1.], [1.], [1.], [1.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # The model should learn (y = x) because of the weights, so the loss should # be close to zero. self.assertLess(scores['loss'], 0.2) def testPredict_AsIterableFalse(self): """Tests predict method with as_iterable=False.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) regressor.predict_scores(input_fn=_input_fn, as_iterable=False) def testPredict_AsIterable(self): """Tests predict method with as_iterable=True.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) predict_input_fn = functools.partial(_input_fn, num_epochs=1) regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'my_error': metric_ops.streaming_mean_squared_error, ('my_metric', 'scores'): _my_metric_op }) self.assertIn('loss', set(scores.keys())) self.assertIn('my_error', set(scores.keys())) self.assertIn('my_metric', set(scores.keys())) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(regressor.predict_scores( input_fn=predict_input_fn))) self.assertAlmostEqual( _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions), scores['my_error']) # Tests the case that the 2nd element of the key is not "scores". with self.assertRaises(KeyError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('my_error', 'predictions'): metric_ops.streaming_mean_squared_error }) # Tests the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('bad_length_name', 'scores', 'bad_length'): metric_ops.streaming_mean_squared_error }) def testCustomMetricsWithMetricSpec(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=5) scores = regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'my_error': MetricSpec( metric_fn=metric_ops.streaming_mean_squared_error, prediction_key='scores'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='scores') }) self.assertIn('loss', set(scores.keys())) self.assertIn('my_error', set(scores.keys())) self.assertIn('my_metric', set(scores.keys())) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(regressor.predict_scores( input_fn=predict_input_fn))) self.assertAlmostEqual( _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions), scores['my_error']) # Tests the case where the prediction_key is not "scores". with self.assertRaisesRegexp(KeyError, 'bad_type'): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) def testExport(self): """Tests export model for servo.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) export_dir = tempfile.mkdtemp() input_feature_key = 'examples' def serving_input_fn(): features, targets = _input_fn() features[input_feature_key] = array_ops.placeholder(dtypes.string) return features, targets regressor.export( export_dir, serving_input_fn, input_feature_key, use_deprecated_input_fn=False) def testTrainSaveLoad(self): """Tests regression with restarting training / evaluate.""" def _input_fn(num_epochs=None): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = { 'x': input_lib.limit_epochs( constant_op.constant([[100.], [3.], [2.], [2.]]), num_epochs=num_epochs) } return features, labels model_dir = tempfile.mkdtemp() # pylint: disable=g-long-lambda new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1)) predict_input_fn = functools.partial(_input_fn, num_epochs=1) regressor = new_regressor() regressor.fit(input_fn=_input_fn, steps=10) predictions = list(regressor.predict_scores(input_fn=predict_input_fn)) del regressor regressor = new_regressor() predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn)) self.assertAllClose(predictions, predictions2) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig(tf_random_seed=1) # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=config) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testDisableCenteredBias(self): """Tests that we can disable centered bias.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], enable_centered_bias=False, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testLinearOnly(self): """Tests linear-only instantiation and training.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testDNNOnly(self): """Tests DNN-only instantiation and training.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) class FeatureEngineeringFunctionTest(test.TestCase): """Tests feature_engineering_fn.""" def testNoneFeatureEngineeringFn(self): def input_fn(): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])} return features, labels def feature_engineering_fn(features, labels): _, _ = features, labels labels = constant_op.constant([[1000.], [30.], [20.], [20.]]) features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])} return features, labels estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1), feature_engineering_fn=feature_engineering_fn) estimator_with_fe_fn.fit(input_fn=input_fn, steps=110) estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) estimator_without_fe_fn.fit(input_fn=input_fn, steps=110) # predictions = y prediction_with_fe_fn = next( estimator_with_fe_fn.predict_scores( input_fn=input_fn, as_iterable=True)) self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0) prediction_without_fe_fn = next( estimator_without_fe_fn.predict_scores( input_fn=input_fn, as_iterable=True)) self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0) if __name__ == '__main__': test.main()
apache-2.0
btabibian/scikit-learn
sklearn/tree/tree.py
11
50091
""" This module gathers tree-based methods, including decision, regression and randomized trees. Single and multi-output problems are both handled. """ # Authors: Gilles Louppe <g.louppe@gmail.com> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Brian Holt <bdholt1@gmail.com> # Noel Dawe <noel@dawe.me> # Satrajit Gosh <satrajit.ghosh@gmail.com> # Joly Arnaud <arnaud.v.joly@gmail.com> # Fares Hedayati <fares.hedayati@gmail.com> # Nelson Liu <nelson@nelsonliu.me> # # License: BSD 3 clause from __future__ import division import numbers import warnings from abc import ABCMeta from abc import abstractmethod from math import ceil import numpy as np from scipy.sparse import issparse from ..base import BaseEstimator from ..base import ClassifierMixin from ..base import RegressorMixin from ..externals import six from ..utils import check_array from ..utils import check_random_state from ..utils import compute_sample_weight from ..utils.multiclass import check_classification_targets from ..utils.validation import check_is_fitted from ..exceptions import NotFittedError from ._criterion import Criterion from ._splitter import Splitter from ._tree import DepthFirstTreeBuilder from ._tree import BestFirstTreeBuilder from ._tree import Tree from . import _tree, _splitter, _criterion __all__ = ["DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor"] # ============================================================================= # Types and constants # ============================================================================= DTYPE = _tree.DTYPE DOUBLE = _tree.DOUBLE CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy} CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE, "mae": _criterion.MAE} DENSE_SPLITTERS = {"best": _splitter.BestSplitter, "random": _splitter.RandomSplitter} SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter, "random": _splitter.RandomSparseSplitter} # ============================================================================= # Base decision tree # ============================================================================= class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator)): """Base class for decision trees. Warning: This class should not be used directly. Use derived classes instead. """ @abstractmethod def __init__(self, criterion, splitter, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_features, max_leaf_nodes, random_state, min_impurity_decrease, min_impurity_split, class_weight=None, presort=False): self.criterion = criterion self.splitter = splitter self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.random_state = random_state self.max_leaf_nodes = max_leaf_nodes self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split self.class_weight = class_weight self.presort = presort def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None): random_state = check_random_state(self.random_state) if check_input: X = check_array(X, dtype=DTYPE, accept_sparse="csc") y = check_array(y, ensure_2d=False, dtype=None) if issparse(X): X.sort_indices() if X.indices.dtype != np.intc or X.indptr.dtype != np.intc: raise ValueError("No support for np.int64 index based " "sparse matrices") # Determine output settings n_samples, self.n_features_ = X.shape is_classification = isinstance(self, ClassifierMixin) y = np.atleast_1d(y) expanded_class_weight = None if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] if is_classification: check_classification_targets(y) y = np.copy(y) self.classes_ = [] self.n_classes_ = [] if self.class_weight is not None: y_original = np.copy(y) y_encoded = np.zeros(y.shape, dtype=np.int) for k in range(self.n_outputs_): classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_encoded if self.class_weight is not None: expanded_class_weight = compute_sample_weight( self.class_weight, y_original) else: self.classes_ = [None] * self.n_outputs_ self.n_classes_ = [1] * self.n_outputs_ self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) # Check parameters max_depth = ((2 ** 31) - 1 if self.max_depth is None else self.max_depth) max_leaf_nodes = (-1 if self.max_leaf_nodes is None else self.max_leaf_nodes) if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)): if not 1 <= self.min_samples_leaf: raise ValueError("min_samples_leaf must be at least 1 " "or in (0, 0.5], got %s" % self.min_samples_leaf) min_samples_leaf = self.min_samples_leaf else: # float if not 0. < self.min_samples_leaf <= 0.5: raise ValueError("min_samples_leaf must be at least 1 " "or in (0, 0.5], got %s" % self.min_samples_leaf) min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples)) if isinstance(self.min_samples_split, (numbers.Integral, np.integer)): if not 2 <= self.min_samples_split: raise ValueError("min_samples_split must be an integer " "greater than 1 or a float in (0.0, 1.0]; " "got the integer %s" % self.min_samples_split) min_samples_split = self.min_samples_split else: # float if not 0. < self.min_samples_split <= 1.: raise ValueError("min_samples_split must be an integer " "greater than 1 or a float in (0.0, 1.0]; " "got the float %s" % self.min_samples_split) min_samples_split = int(ceil(self.min_samples_split * n_samples)) min_samples_split = max(2, min_samples_split) min_samples_split = max(min_samples_split, 2 * min_samples_leaf) if isinstance(self.max_features, six.string_types): if self.max_features == "auto": if is_classification: max_features = max(1, int(np.sqrt(self.n_features_))) else: max_features = self.n_features_ elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features_))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_))) else: raise ValueError( 'Invalid value for max_features. Allowed string ' 'values are "auto", "sqrt" or "log2".') elif self.max_features is None: max_features = self.n_features_ elif isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float if self.max_features > 0.0: max_features = max(1, int(self.max_features * self.n_features_)) else: max_features = 0 self.max_features_ = max_features if len(y) != n_samples: raise ValueError("Number of labels=%d does not match " "number of samples=%d" % (len(y), n_samples)) if not 0 <= self.min_weight_fraction_leaf <= 0.5: raise ValueError("min_weight_fraction_leaf must in [0, 0.5]") if max_depth <= 0: raise ValueError("max_depth must be greater than zero. ") if not (0 < max_features <= self.n_features_): raise ValueError("max_features must be in (0, n_features]") if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)): raise ValueError("max_leaf_nodes must be integral number but was " "%r" % max_leaf_nodes) if -1 < max_leaf_nodes < 2: raise ValueError(("max_leaf_nodes {0} must be either None " "or larger than 1").format(max_leaf_nodes)) if sample_weight is not None: if (getattr(sample_weight, "dtype", None) != DOUBLE or not sample_weight.flags.contiguous): sample_weight = np.ascontiguousarray( sample_weight, dtype=DOUBLE) if len(sample_weight.shape) > 1: raise ValueError("Sample weights array has more " "than one dimension: %d" % len(sample_weight.shape)) if len(sample_weight) != n_samples: raise ValueError("Number of weights=%d does not match " "number of samples=%d" % (len(sample_weight), n_samples)) if expanded_class_weight is not None: if sample_weight is not None: sample_weight = sample_weight * expanded_class_weight else: sample_weight = expanded_class_weight # Set min_weight_leaf from min_weight_fraction_leaf if sample_weight is None: min_weight_leaf = (self.min_weight_fraction_leaf * n_samples) else: min_weight_leaf = (self.min_weight_fraction_leaf * np.sum(sample_weight)) if self.min_impurity_split is not None: warnings.warn("The min_impurity_split parameter is deprecated and" " will be removed in version 0.21. " "Use the min_impurity_decrease parameter instead.", DeprecationWarning) min_impurity_split = self.min_impurity_split else: min_impurity_split = 1e-7 if min_impurity_split < 0.: raise ValueError("min_impurity_split must be greater than " "or equal to 0") if self.min_impurity_decrease < 0.: raise ValueError("min_impurity_decrease must be greater than " "or equal to 0") presort = self.presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if self.presort == 'auto' and issparse(X): presort = False elif self.presort == 'auto': presort = True if presort is True and issparse(X): raise ValueError("Presorting is not supported for sparse " "matrices.") # If multiple trees are built on the same dataset, we only want to # presort once. Splitters now can accept presorted indices if desired, # but do not handle any presorting themselves. Ensemble algorithms # which desire presorting must do presorting themselves and pass that # matrix into each tree. if X_idx_sorted is None and presort: X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0), dtype=np.int32) if presort and X_idx_sorted.shape != X.shape: raise ValueError("The shape of X (X.shape = {}) doesn't match " "the shape of X_idx_sorted (X_idx_sorted" ".shape = {})".format(X.shape, X_idx_sorted.shape)) # Build tree criterion = self.criterion if not isinstance(criterion, Criterion): if is_classification: criterion = CRITERIA_CLF[self.criterion](self.n_outputs_, self.n_classes_) else: criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples) SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS splitter = self.splitter if not isinstance(self.splitter, Splitter): splitter = SPLITTERS[self.splitter](criterion, self.max_features_, min_samples_leaf, min_weight_leaf, random_state, self.presort) self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_) # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise if max_leaf_nodes < 0: builder = DepthFirstTreeBuilder(splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, self.min_impurity_decrease, min_impurity_split) else: builder = BestFirstTreeBuilder(splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, max_leaf_nodes, self.min_impurity_decrease, min_impurity_split) builder.build(self.tree_, X, y, sample_weight, X_idx_sorted) if self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] return self def _validate_X_predict(self, X, check_input): """Validate X whenever one tries to predict, apply, predict_proba""" if check_input: X = check_array(X, dtype=DTYPE, accept_sparse="csr") if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc): raise ValueError("No support for np.int64 index based " "sparse matrices") n_features = X.shape[1] if self.n_features_ != n_features: raise ValueError("Number of features of the model must " "match the input. Model n_features is %s and " "input n_features is %s " % (self.n_features_, n_features)) return X def predict(self, X, check_input=True): """Predict class or regression value for X. For a classification model, the predicted class for each sample in X is returned. For a regression model, the predicted value based on X is returned. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] or [n_samples, n_outputs] The predicted classes, or the predict values. """ check_is_fitted(self, 'tree_') X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) n_samples = X.shape[0] # Classification if isinstance(self, ClassifierMixin): if self.n_outputs_ == 1: return self.classes_.take(np.argmax(proba, axis=1), axis=0) else: predictions = np.zeros((n_samples, self.n_outputs_)) for k in range(self.n_outputs_): predictions[:, k] = self.classes_[k].take( np.argmax(proba[:, k], axis=1), axis=0) return predictions # Regression else: if self.n_outputs_ == 1: return proba[:, 0] else: return proba[:, :, 0] def apply(self, X, check_input=True): """ Returns the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters ---------- X : array_like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- X_leaves : array_like, shape = [n_samples,] For each datapoint x in X, return the index of the leaf x ends up in. Leaves are numbered within ``[0; self.tree_.node_count)``, possibly with gaps in the numbering. """ check_is_fitted(self, 'tree_') X = self._validate_X_predict(X, check_input) return self.tree_.apply(X) def decision_path(self, X, check_input=True): """Return the decision path in the tree .. versionadded:: 0.18 Parameters ---------- X : array_like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- indicator : sparse csr array, shape = [n_samples, n_nodes] Return a node indicator matrix where non zero elements indicates that the samples goes through the nodes. """ X = self._validate_X_predict(X, check_input) return self.tree_.decision_path(X) @property def feature_importances_(self): """Return the feature importances. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Returns ------- feature_importances_ : array, shape = [n_features] """ check_is_fitted(self, 'tree_') return self.tree_.compute_feature_importances() # ============================================================================= # Public estimators # ============================================================================= class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin): """A decision tree classifier. Read more in the :ref:`User Guide <tree>`. Parameters ---------- criterion : string, optional (default="gini") The function to measure the quality of a split. Supported criteria are "gini" for the Gini impurity and "entropy" for the information gain. splitter : string, optional (default="best") The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : int or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. class_weight : dict, list of dicts, "balanced" or None, optional (default=None) Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. Note that for multioutput (including multilabel) weights should be defined for each class of every column in its own dict. For example, for four-class multilabel classification weights should be [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of [{1:1}, {2:5}, {3:1}, {4:1}]. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` For multi-output, the weights of each column of y will be multiplied. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 presort : bool, optional (default=False) Whether to presort the data to speed up the finding of best splits in fitting. For the default settings of a decision tree on large datasets, setting this to true may slow down the training process. When using either a smaller dataset or a restricted depth, this may speed up the training. Attributes ---------- classes_ : array of shape = [n_classes] or a list of such arrays The classes labels (single output problem), or a list of arrays of class labels (multi-output problem). feature_importances_ : array of shape = [n_features] The feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance [4]_. max_features_ : int, The inferred value of max_features. n_classes_ : int or list The number of classes (for single output problems), or a list containing the number of classes for each output (for multi-output problems). n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. tree_ : Tree object The underlying Tree object. Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data and ``max_features=n_features``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. See also -------- DecisionTreeRegressor References ---------- .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification and Regression Trees", Wadsworth, Belmont, CA, 1984. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical Learning", Springer, 2009. .. [4] L. Breiman, and A. Cutler, "Random Forests", http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.model_selection import cross_val_score >>> from sklearn.tree import DecisionTreeClassifier >>> clf = DecisionTreeClassifier(random_state=0) >>> iris = load_iris() >>> cross_val_score(clf, iris.data, iris.target, cv=10) ... # doctest: +SKIP ... array([ 1. , 0.93..., 0.86..., 0.93..., 0.93..., 0.93..., 0.93..., 1. , 0.93..., 1. ]) """ def __init__(self, criterion="gini", splitter="best", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, class_weight=None, presort=False): super(DecisionTreeClassifier, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, random_state=random_state, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, presort=presort) def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None): """Build a decision tree classifier from the training set (X, y). Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in any single class carrying a negative weight in either child node. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. X_idx_sorted : array-like, shape = [n_samples, n_features], optional The indexes of the sorted training input samples. If many tree are grown on the same dataset, this allows the ordering to be cached between trees. If None, the data will be sorted here. Don't use this parameter unless you know what to do. Returns ------- self : object Returns self. """ super(DecisionTreeClassifier, self).fit( X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted) return self def predict_proba(self, X, check_input=True): """Predict class probabilities of the input samples X. The predicted class probability is the fraction of samples of the same class in a leaf. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ check_is_fitted(self, 'tree_') X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) if self.n_outputs_ == 1: proba = proba[:, :self.n_classes_] normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer return proba else: all_proba = [] for k in range(self.n_outputs_): proba_k = proba[:, k, :self.n_classes_[k]] normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer all_proba.append(proba_k) return all_proba def predict_log_proba(self, X): """Predict class log-probabilities of the input samples X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs such arrays if n_outputs > 1. The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: for k in range(self.n_outputs_): proba[k] = np.log(proba[k]) return proba class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin): """A decision tree regressor. Read more in the :ref:`User Guide <tree>`. Parameters ---------- criterion : string, optional (default="mse") The function to measure the quality of a split. Supported criteria are "mse" for the mean squared error, which is equal to variance reduction as feature selection criterion, and "mae" for the mean absolute error. .. versionadded:: 0.18 Mean Absolute Error (MAE) criterion. splitter : string, optional (default="best") The strategy used to choose the split at each node. Supported strategies are "best" to choose the best split and "random" to choose the best random split. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_depth : int or None, optional (default=None) The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int, float, optional (default=2) The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a percentage and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for percentages. min_samples_leaf : int, float, optional (default=1) The minimum number of samples required to be at a leaf node: - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a percentage and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for percentages. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_leaf_nodes : int or None, optional (default=None) Grow a tree with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. min_impurity_split : float, Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19 and will be removed in 0.21. Use ``min_impurity_decrease`` instead. min_impurity_decrease : float, optional (default=0.) A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 presort : bool, optional (default=False) Whether to presort the data to speed up the finding of best splits in fitting. For the default settings of a decision tree on large datasets, setting this to true may slow down the training process. When using either a smaller dataset or a restricted depth, this may speed up the training. Attributes ---------- feature_importances_ : array of shape = [n_features] The feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance [4]_. max_features_ : int, The inferred value of max_features. n_features_ : int The number of features when ``fit`` is performed. n_outputs_ : int The number of outputs when ``fit`` is performed. tree_ : Tree object The underlying Tree object. Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data and ``max_features=n_features``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. See also -------- DecisionTreeClassifier References ---------- .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification and Regression Trees", Wadsworth, Belmont, CA, 1984. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical Learning", Springer, 2009. .. [4] L. Breiman, and A. Cutler, "Random Forests", http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm Examples -------- >>> from sklearn.datasets import load_boston >>> from sklearn.model_selection import cross_val_score >>> from sklearn.tree import DecisionTreeRegressor >>> boston = load_boston() >>> regressor = DecisionTreeRegressor(random_state=0) >>> cross_val_score(regressor, boston.data, boston.target, cv=10) ... # doctest: +SKIP ... array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75..., 0.07..., 0.29..., 0.33..., -1.42..., -1.77...]) """ def __init__(self, criterion="mse", splitter="best", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, presort=False): super(DecisionTreeRegressor, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, random_state=random_state, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, presort=presort) def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None): """Build a decision tree regressor from the training set (X, y). Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (real numbers). Use ``dtype=np.float64`` and ``order='C'`` for maximum efficiency. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. X_idx_sorted : array-like, shape = [n_samples, n_features], optional The indexes of the sorted training input samples. If many tree are grown on the same dataset, this allows the ordering to be cached between trees. If None, the data will be sorted here. Don't use this parameter unless you know what to do. Returns ------- self : object Returns self. """ super(DecisionTreeRegressor, self).fit( X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted) return self class ExtraTreeClassifier(DecisionTreeClassifier): """An extremely randomized tree classifier. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the `max_features` randomly selected features and the best split among those is chosen. When `max_features` is set 1, this amounts to building a totally random decision tree. Warning: Extra-trees should only be used within ensemble methods. Read more in the :ref:`User Guide <tree>`. See also -------- ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. """ def __init__(self, criterion="gini", splitter="random", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", random_state=None, max_leaf_nodes=None, min_impurity_decrease=0., min_impurity_split=None, class_weight=None): super(ExtraTreeClassifier, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, random_state=random_state) class ExtraTreeRegressor(DecisionTreeRegressor): """An extremely randomized tree regressor. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the `max_features` randomly selected features and the best split among those is chosen. When `max_features` is set 1, this amounts to building a totally random decision tree. Warning: Extra-trees should only be used within ensemble methods. Read more in the :ref:`User Guide <tree>`. See also -------- ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor Notes ----- The default values for the parameters controlling the size of the trees (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and unpruned trees which can potentially be very large on some data sets. To reduce memory consumption, the complexity and size of the trees should be controlled by setting those parameter values. References ---------- .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees", Machine Learning, 63(1), 3-42, 2006. """ def __init__(self, criterion="mse", splitter="random", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_features="auto", random_state=None, min_impurity_decrease=0., min_impurity_split=None, max_leaf_nodes=None): super(ExtraTreeRegressor, self).__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, random_state=random_state)
bsd-3-clause
wlamond/scikit-learn
sklearn/linear_model/sag.py
30
12959
"""Solvers for Ridge and LogisticRegression using SAG algorithm""" # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org> # # License: BSD 3 clause import warnings import numpy as np from .base import make_dataset from .sag_fast import sag from ..exceptions import ConvergenceWarning from ..utils import check_array from ..utils.extmath import row_norms def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False): """Compute automatic step size for SAG solver The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is the max sum of squares for over all samples. Parameters ---------- max_squared_sum : float Maximum squared sum of X over samples. alpha_scaled : float Constant that multiplies the regularization term, scaled by 1. / n_samples, the number of samples. loss : string, in {"log", "squared"} The loss function used in SAG solver. fit_intercept : bool Specifies if a constant (a.k.a. bias or intercept) will be added to the decision function. n_samples : int, optional Number of rows in X. Useful if is_saga=True. is_saga : boolean, optional Whether to return step size for the SAGA algorithm or the SAG algorithm. Returns ------- step_size : float Step size used in SAG solver. References ---------- Schmidt, M., Roux, N. L., & Bach, F. (2013). Minimizing finite sums with the stochastic average gradient https://hal.inria.fr/hal-00860051/document Defazio, A., Bach F. & Lacoste-Julien S. (2014). SAGA: A Fast Incremental Gradient Method With Support for Non-Strongly Convex Composite Objectives https://arxiv.org/abs/1407.0202 """ if loss in ('log', 'multinomial'): L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled) elif loss == 'squared': # inverse Lipschitz constant for squared loss L = max_squared_sum + int(fit_intercept) + alpha_scaled else: raise ValueError("Unknown loss function for SAG solver, got %s " "instead of 'log' or 'squared'" % loss) if is_saga: # SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n)) # See Defazio et al. 2014 mun = min(2 * n_samples * alpha_scaled, L) step = 1. / (2 * L + mun) else: # SAG theoretical step size is 1/16L but it is recommended to use 1 / L # see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf, # slide 65 step = 1. / L return step def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0., max_iter=1000, tol=0.001, verbose=0, random_state=None, check_input=True, max_squared_sum=None, warm_start_mem=None, is_saga=False): """SAG solver for Ridge and LogisticRegression SAG stands for Stochastic Average Gradient: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a constant learning rate. IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the same scale. You can normalize the data by using sklearn.preprocessing.StandardScaler on your data before passing it to the fit method. This implementation works with data represented as dense numpy arrays or sparse scipy arrays of floating point values for the features. It will fit the data according to squared loss or log loss. The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using the squared euclidean norm L2. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values. With loss='multinomial', y must be label encoded (see preprocessing.LabelEncoder). sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). loss : 'log' | 'squared' | 'multinomial' Loss function that will be optimized: -'log' is the binary logistic loss, as used in LogisticRegression. -'squared' is the squared loss, as used in Ridge. -'multinomial' is the multinomial logistic loss, as used in LogisticRegression. .. versionadded:: 0.18 *loss='multinomial'* alpha : float, optional Constant that multiplies the regularization term. Defaults to 1. max_iter : int, optional The max number of passes over the training data if the stopping criteria is not reached. Defaults to 1000. tol : double, optional The stopping criteria for the weights. The iterations will stop when max(change in weights) / max(weights) < tol. Defaults to .001 verbose : integer, optional The verbosity level. random_state : int, RandomState instance or None, optional, default None The seed of the pseudo random number generator to use when shuffling the data. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. check_input : bool, default True If False, the input arrays X and y will not be checked. max_squared_sum : float, default None Maximum squared sum of X over samples. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. warm_start_mem : dict, optional The initialization parameters used for warm starting. Warm starting is currently used in LogisticRegression but not in Ridge. It contains: - 'coef': the weight vector, with the intercept in last line if the intercept is fitted. - 'gradient_memory': the scalar gradient for all seen samples. - 'sum_gradient': the sum of gradient over all seen samples, for each feature. - 'intercept_sum_gradient': the sum of gradient over all seen samples, for the intercept. - 'seen': array of boolean describing the seen samples. - 'num_seen': the number of seen samples. is_saga : boolean, optional Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves better in the first epochs, and allow for l1 regularisation. Returns ------- coef_ : array, shape (n_features) Weight vector. n_iter_ : int The number of full pass on all samples. warm_start_mem : dict Contains a 'coef' key with the fitted result, and possibly the fitted intercept at the end of the array. Contains also other keys used for warm starting. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> X = np.random.randn(n_samples, n_features) >>> y = np.random.randn(n_samples) >>> clf = linear_model.Ridge(solver='sag') >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='sag', tol=0.001) >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> clf = linear_model.LogisticRegression(solver='sag') >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='sag', tol=0.0001, verbose=0, warm_start=False) References ---------- Schmidt, M., Roux, N. L., & Bach, F. (2013). Minimizing finite sums with the stochastic average gradient https://hal.inria.fr/hal-00860051/document Defazio, A., Bach F. & Lacoste-Julien S. (2014). SAGA: A Fast Incremental Gradient Method With Support for Non-Strongly Convex Composite Objectives https://arxiv.org/abs/1407.0202 See also -------- Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and LogisticRegression, SGDClassifier, LinearSVC, Perceptron """ if warm_start_mem is None: warm_start_mem = {} # Ridge default max_iter is None if max_iter is None: max_iter = 1000 if check_input: X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C') y = check_array(y, dtype=np.float64, ensure_2d=False, order='C') n_samples, n_features = X.shape[0], X.shape[1] # As in SGD, the alpha is scaled by n_samples. alpha_scaled = float(alpha) / n_samples beta_scaled = float(beta) / n_samples # if loss == 'multinomial', y should be label encoded. n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1 # initialization if sample_weight is None: sample_weight = np.ones(n_samples, dtype=np.float64, order='C') if 'coef' in warm_start_mem.keys(): coef_init = warm_start_mem['coef'] else: # assume fit_intercept is False coef_init = np.zeros((n_features, n_classes), dtype=np.float64, order='C') # coef_init contains possibly the intercept_init at the end. # Note that Ridge centers the data before fitting, so fit_intercept=False. fit_intercept = coef_init.shape[0] == (n_features + 1) if fit_intercept: intercept_init = coef_init[-1, :] coef_init = coef_init[:-1, :] else: intercept_init = np.zeros(n_classes, dtype=np.float64) if 'intercept_sum_gradient' in warm_start_mem.keys(): intercept_sum_gradient = warm_start_mem['intercept_sum_gradient'] else: intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64) if 'gradient_memory' in warm_start_mem.keys(): gradient_memory_init = warm_start_mem['gradient_memory'] else: gradient_memory_init = np.zeros((n_samples, n_classes), dtype=np.float64, order='C') if 'sum_gradient' in warm_start_mem.keys(): sum_gradient_init = warm_start_mem['sum_gradient'] else: sum_gradient_init = np.zeros((n_features, n_classes), dtype=np.float64, order='C') if 'seen' in warm_start_mem.keys(): seen_init = warm_start_mem['seen'] else: seen_init = np.zeros(n_samples, dtype=np.int32, order='C') if 'num_seen' in warm_start_mem.keys(): num_seen_init = warm_start_mem['num_seen'] else: num_seen_init = 0 dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state) if max_squared_sum is None: max_squared_sum = row_norms(X, squared=True).max() step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=n_samples, is_saga=is_saga) if step_size * alpha_scaled == 1: raise ZeroDivisionError("Current sag implementation does not handle " "the case step_size * alpha_scaled == 1") num_seen, n_iter_ = sag(dataset, coef_init, intercept_init, n_samples, n_features, n_classes, tol, max_iter, loss, step_size, alpha_scaled, beta_scaled, sum_gradient_init, gradient_memory_init, seen_init, num_seen_init, fit_intercept, intercept_sum_gradient, intercept_decay, is_saga, verbose) if n_iter_ == max_iter: warnings.warn("The max_iter was reached which means " "the coef_ did not converge", ConvergenceWarning) if fit_intercept: coef_init = np.vstack((coef_init, intercept_init)) warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init, 'intercept_sum_gradient': intercept_sum_gradient, 'gradient_memory': gradient_memory_init, 'seen': seen_init, 'num_seen': num_seen} if loss == 'multinomial': coef_ = coef_init.T else: coef_ = coef_init[:, 0] return coef_, n_iter_, warm_start_mem
bsd-3-clause
fatadama/estimation
challenge_problem/trials_bifurcation/ukf_trials.py
1
6860
"""@package ukf_trials loads data, passes through UKF """ import numpy as np import math import matplotlib.pyplot as plt import sys import time import scipy.stats as stats sys.path.append('../') import cp_dynamics sys.path.append('../../filters/python/ukf') import ukf sys.path.append('../sim_data') import generate_data sys.path.append('../trials') import trials_processing def eqom_ukf(x,t,u,v): return cp_dynamics.eqom_stoch(x,t,v) def measurement_ukf(x,t,n): return np.array([[ x[0] + n[0] ]]) #@param[out] xf [nSteps x 2] estimate history output #@param[out] Pf [nSteps x 2 x 2] covariance history output #@param[out] trials_processing.simOutput() object with flags if the sim failed def ukf_test(dt,tf,mux0,P0,YK,Qk,Rk): UKF = ukf.ukf(2,0,1,eqom_ukf,Qk) nSteps = int(tf/dt)+1 ts = 0.0 # initialize UKF UKF.init_P(mux0,P0,ts) # initialize performance object simOut = trials_processing.simOutput() xf = np.zeros((nSteps,2)) Pf = np.zeros((nSteps,2,2)) tk = np.arange(0.0,tf,dt) xf[0,:] = UKF.xhat.copy() Pf[0,:,:] = UKF.Pk.copy() t1 = time.time() for k in range(1,nSteps): # get the new measurement ym = np.array([YK[k]]) ts = ts + dt # sync the UKF, with continuous-time integration try: UKF.sync(dt,ym,measurement_ukf,Rk,True) except np.linalg.linalg.LinAlgError: print("Singular covariance at t = %f" % (ts)) simOut.fail_singular_covariance(k) return(xf,Pf,simOut) # check that the eigenvalukes are reasonably bounded w = np.linalg.eigvalsh(UKF.Pk.copy()) for jj in range(len(w)): if math.fabs(w[jj]) > 1.0e6: simOut.fail_singular_covariance(k) print("Covariance eigenvalue too large, t = %f" % (ts)) return(xf,Pf,simOut) # copy #if k < nSteps-1: xf[k,:] = UKF.xhat.copy() Pf[k,:,:] = UKF.Pk.copy() t2 = time.time() print("Elapsed time: %f sec" % (t2-t1)) simOut.complete(nSteps) return(xf,Pf,simOut) def main(): P0 = np.array([[2.0,0.0],[0.0,1.0]]) Ns = 100 Ts = 3.5 (tsim,XK,YK,mu0,dt,tf) = generate_data.execute_sim(cp_dynamics.eqom_stoch_cluster,Ts,30*Ts,Ns,P0,cluster=True,informative=True) Qk = np.array([[0.01]]) Rk = np.array([[0.01]]) # number of steps in each simulation nSteps = len(tsim) nees_history = np.zeros((nSteps,Ns)) e_sims = np.zeros((Ns*nSteps,2)) count_good = 0 count_singular_covariance = 0 count_large_errors = 0 for counter in range(Ns): xk = XK[:,(2*counter):(2*counter+2)] yk = YK[:,counter] (xf,Pf,simOut) = ukf_test(dt,tf,mu0,P0,yk,Qk,Rk) if simOut.singular_covariance: print("Simulation exited with singular covariance at index %d" % (simOut.last_index)) count_singular_covariance = count_singular_covariance + 1 continue # compute the unit variance transformation of the error e1 = np.zeros((nSteps,2)) chi2 = np.zeros(nSteps) (e1[0:simOut.last_index,:],chi2[0:simOut.last_index]) = trials_processing.computeErrors(xf[0:simOut.last_index,:],Pf[0:simOut.last_index,:,:],xk[0:simOut.last_index,:]) nees_history[:,counter] = chi2.copy() mean_nees = np.sum(chi2)/float(nSteps) print(mean_nees) # mean NEES mse = np.sum(np.power(e1,2.0),axis=0)/float(nSteps) e_sims[(counter*nSteps):(counter*nSteps+nSteps),:] = e1.copy() if (mse[0] > 1.0) or (mse[1] > 1.0): count_large_errors = count_large_errors + 1 continue count_good = count_good + 1 print("MSE: %f,%f" % (mse[0],mse[1])) # chi-square test statistics # (alpha) probability of being less than the returned value: stats.chi2.ppf(alpha,df=Nsims) if Ns < 2: trials_processing.printSingleSim(tsim,xf,Pf,xk,name='ukf',save_flag=None,history_lines=True,draw_snapshots=False) #trials_processing.errorParsing(e_sims,nees_history,'ukf','sims_01_bifurcation') # write to file # Ns, count_good, count_singular_covariance, count_large_errors, Qk[0,0], Ts fname = 'ukf_data.txt' FID = open(fname,'a') FID.write("%d,%d,%d,%d,%g,%g\n" % (Ns,count_good,count_singular_covariance,count_large_errors,Qk[0,0],Ts)) FID.close() """ if Ns < 2: fig1 = plt.figure() ax = [] for k in range(4): if k < 2: nam = 'x' + str(k+1) else: nam = 'e' + str(k-1) ax.append(fig1.add_subplot(2,2,k+1,ylabel=nam)) if k < 2: ax[k].plot(tsim,xk[:,k],'b-') ax[k].plot(tsim,xf[:,k],'m--') if k == 0: ax[k].plot(tsim,yk,'r--') else: ax[k].plot(tsim,xk[:,k-2]-xf[:,k-2]) ax[k].plot(tsim,3.0*np.sqrt(Pf[:,3*(k-2)]),'r--') ax[k].plot(tsim,-3.0*np.sqrt(Pf[:,3*(k-2)]),'r--') ax[k].grid() fig1.show() fig2 = plt.figure() ax = [] ax.append(fig2.add_subplot(111,ylabel = 'nees metric')) ax[0].plot(tsim,chi2) ax[0].grid() fig2.show() trials_processing.errorParsing(e_sims,nees_history,'ukf',nameNow) mse_tot = np.mean(np.power(e_sims,2.0),axis=0) print("mse_tot: %f,%f" % (mse_tot[0],mse_tot[1])) # get the mean NEES value versus simulation time across all sims nees_mean = np.sum(nees_history,axis=1)/Ns # get 95% confidence bounds for chi-sqaured... the df is the number of sims times the dimension of the state chiUpper = stats.chi2.ppf(.975,2.0*Ns)/float(Ns) chiLower = stats.chi2.ppf(.025,2.0*Ns)/float(Ns) # plot the mean NEES with the 95% confidence bounds fig2 = plt.figure(figsize=(6.0,3.37)) #figsize tuple is width, height tilt = "UKF, Ts = %.2f, %d sims, " % (dt, Ns) if nameBit == 0: tilt = tilt + 'unforced' if nameBit == 1: #white-noise only tilt = tilt + 'white-noise forcing' if nameBit == 2: tilt = tilt + 'cosine forcing' if nameBit == 3: #white-noise and cosine forcing tilt = tilt + 'white-noise and cosine forcing' ax = fig2.add_subplot(111,ylabel='mean NEES',title=tilt) ax.plot(tsim,chiUpper*np.ones(nSteps),'r--') ax.plot(tsim,chiLower*np.ones(nSteps),'r--') ax.plot(tsim,nees_mean,'b-') ax.grid() fig2.show() # save the figure fig2.savefig('nees_ukf_' + nameNow + '.png') # find fraction of inliers l1 = (nees_mean < chiUpper).nonzero()[0] l2 = (nees_mean > chiLower).nonzero()[0] # get number of inliers len_in = len(set(l1).intersection(l2)) # get number of super (above) liers (sic) len_super = len((nees_mean > chiUpper).nonzero()[0]) # get number of sub-liers (below) len_sub = len((nees_mean < chiLower).nonzero()[0]) print("Conservative (below 95%% bounds): %f" % (float(len_sub)/float(nSteps))) print("Optimistic (above 95%% bounds): %f" % (float(len_super)/float(nSteps))) # save metrics FID = open('metrics_ukf_' + nameNow + '.txt','w') FID.write("mse1,mse2,nees_below95,nees_above95\n") FID.write("%f,%f,%f,%f\n" % (mse_tot[0],mse_tot[1],float(len_sub)/float(nSteps),float(len_super)/float(nSteps))) FID.close() # plot all NEES fig = plt.figure(figsize=(6.0,3.37)) ax = fig.add_subplot(111,ylabel='NEES') ax.plot(tsim,nees_history,'b-') ax.grid() fig.show() raw_input("Return to quit") """ print("Leaving ukf_trials") return if __name__ == "__main__": main()
gpl-2.0