repo_name
stringlengths
9
55
path
stringlengths
7
120
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
169k
license
stringclasses
12 values
miaecle/deepchem
contrib/mpnn/donkey.py
7
3029
# 2017 DeepCrystal Technologies - Patrick Hop # # Data loading a splitting file # # MIT License - have fun!! # =========================================================== import os import random from collections import OrderedDict import deepchem as dc from deepchem.utils import ScaffoldGenerator from deepchem.utils.save import log import torch import torch.nn as nn from torch.autograd import Variable import numpy as np from sklearn import preprocessing from sklearn.decomposition import TruncatedSVD from rdkit import Chem from rdkit import DataStructs from rdkit.Chem import AllChem random.seed(2) np.random.seed(2) torch.manual_seed(2) def generate_scaffold(smiles, include_chirality=False): """Compute the Bemis-Murcko scaffold for a SMILES string.""" mol = Chem.MolFromSmiles(smiles) engine = ScaffoldGenerator(include_chirality=include_chirality) scaffold = engine.get_scaffold(mol) return scaffold def split(dataset, frac_train=.80, frac_valid=.10, frac_test=.10, log_every_n=1000): """ Splits internal compounds into train/validation/test by scaffold. """ np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.) scaffolds = {} log("About to generate scaffolds", True) data_len = len(dataset) for ind, smiles in enumerate(dataset): if ind % log_every_n == 0: log("Generating scaffold %d/%d" % (ind, data_len), True) scaffold = generate_scaffold(smiles) if scaffold not in scaffolds: scaffolds[scaffold] = [ind] else: scaffolds[scaffold].append(ind) scaffolds = {key: sorted(value) for key, value in scaffolds.items()} scaffold_sets = [ scaffold_set for (scaffold, scaffold_set) in sorted( scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True) ] train_cutoff = frac_train * len(dataset) valid_cutoff = (frac_train + frac_valid) * len(dataset) train_inds, valid_inds, test_inds = [], [], [] log("About to sort in scaffold sets", True) for scaffold_set in scaffold_sets: if len(train_inds) + len(scaffold_set) > train_cutoff: if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff: test_inds += scaffold_set else: valid_inds += scaffold_set else: train_inds += scaffold_set return train_inds, valid_inds, test_inds def load_dataset(filename, whiten=False): f = open(filename, 'r') features = [] labels = [] tracer = 0 for line in f: if tracer == 0: tracer += 1 continue splits = line[:-1].split(',') features.append(splits[-1]) labels.append(float(splits[-2])) features = np.array(features) labels = np.array(labels, dtype='float32').reshape(-1, 1) train_ind, val_ind, test_ins = split(features) train_features = np.take(features, train_ind) train_labels = np.take(labels, train_ind) val_features = np.take(features, val_ind) val_labels = np.take(labels, val_ind) return train_features, train_labels, val_features, val_labels
mit
RecipeML/Recipe
recipe/classifiers/randomLogistic.py
1
2397
# -*- coding: utf-8 -*- """ Copyright 2016 Walter José and Alex de Sá This file is part of the RECIPE Algorithm. The RECIPE is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/. """ from sklearn.linear_model import RandomizedLogisticRegression def randomLogistic(args): """Uses scikit-learn's Randomized Logistic Regression, works by subsampling the training data and fitting a L1-penalized LogisticRegression model where the penalty of a random subset of coefficients has been scaled. Parameters ---------- C : float The regularization parameter C in the LogisticRegression. scaling : float The s parameter used to randomly scale the penalty of different features sample_fraction : float The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int Number of randomized models. selection_threshold : float The score above which features should be selected. normalize : boolean If True, the regressors X will be normalized before regression. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). tol : float tolerance for stopping criteria of LogisticRegression """ c = float(args[1]) scal = float(args[2]) s_frac = float(args[3]) n_res = int(args[4]) st = float(args[5]) norm = False if(args[6].find("True")!=-1): norm = True fi = False if(args[7].find("True")!=-1): fi = True t = float(args[8]) return RandomizedLogisticRegression(C=c, scaling=scal, sample_fraction=s_frac, n_resampling=n_res, selection_threshold=st, tol=t, fit_intercept=fi, verbose=False, normalize=norm, random_state=42, n_jobs=1, pre_dispatch='1*n_jobs')
gpl-3.0
CompPhysics/MachineLearning
doc/src/LectureNotes/_build/jupyter_execute/chapter8.py
1
48694
# Dimensionality Reduction ## Reducing the number of degrees of freedom, overarching view Many Machine Learning problems involve thousands or even millions of features for each training instance. Not only does this make training extremely slow, it can also make it much harder to find a good solution, as we will see. This problem is often referred to as the curse of dimensionality. Fortunately, in real-world problems, it is often possible to reduce the number of features considerably, turning an intractable problem into a tractable one. Here we will discuss some of the most popular dimensionality reduction techniques: the principal component analysis (PCA), Kernel PCA, and Locally Linear Embedding (LLE). Furthermore, we will start by looking at some simple preprocessing of the data which allow us to rescale the data. Principal component analysis and its various variants deal with the problem of fitting a low-dimensional [affine subspace](https://en.wikipedia.org/wiki/Affine_space) to a set of of data points in a high-dimensional space. With its family of methods it is one of the most used tools in data modeling, compression and visualization. ## Preprocessing our data Before we proceed however, we will discuss how to preprocess our data. Till now and in connection with our previous examples we have not met so many cases where we are too sensitive to the scaling of our data. Normally the data may need a rescaling and/or may be sensitive to extreme values. Scaling the data renders our inputs much more suitable for the algorithms we want to employ. **Scikit-Learn** has several functions which allow us to rescale the data, normally resulting in much better results in terms of various accuracy scores. The **StandardScaler** function in **Scikit-Learn** ensures that for each feature/predictor we study the mean value is zero and the variance is one (every column in the design/feature matrix). This scaling has the drawback that it does not ensure that we have a particular maximum or minimum in our data set. Another function included in **Scikit-Learn** is the **MinMaxScaler** which ensures that all features are exactly between $0$ and $1$. The ## More preprocessing The **Normalizer** scales each data point such that the feature vector has a euclidean length of one. In other words, it projects a data point on the circle (or sphere in the case of higher dimensions) with a radius of 1. This means every data point is scaled by a different number (by the inverse of it’s length). This normalization is often used when only the direction (or angle) of the data matters, not the length of the feature vector. The **RobustScaler** works similarly to the StandardScaler in that it ensures statistical properties for each feature that guarantee that they are on the same scale. However, the RobustScaler uses the median and quartiles, instead of mean and variance. This makes the RobustScaler ignore data points that are very different from the rest (like measurement errors). These odd data points are also called outliers, and might often lead to trouble for other scaling techniques. ## Simple preprocessing examples, Franke function and regression %matplotlib inline # Common imports import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn.linear_model as skl from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer from sklearn.svm import SVR # Where to save the figures and data files PROJECT_ROOT_DIR = "Results" FIGURE_ID = "Results/FigureFiles" DATA_ID = "DataFiles/" if not os.path.exists(PROJECT_ROOT_DIR): os.mkdir(PROJECT_ROOT_DIR) if not os.path.exists(FIGURE_ID): os.makedirs(FIGURE_ID) if not os.path.exists(DATA_ID): os.makedirs(DATA_ID) def image_path(fig_id): return os.path.join(FIGURE_ID, fig_id) def data_path(dat_id): return os.path.join(DATA_ID, dat_id) def save_fig(fig_id): plt.savefig(image_path(fig_id) + ".png", format='png') def FrankeFunction(x,y): term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2)) term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1)) term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2)) term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2) return term1 + term2 + term3 + term4 def create_X(x, y, n ): if len(x.shape) > 1: x = np.ravel(x) y = np.ravel(y) N = len(x) l = int((n+1)*(n+2)/2) # Number of elements in beta X = np.ones((N,l)) for i in range(1,n+1): q = int((i)*(i+1)/2) for k in range(i+1): X[:,q+k] = (x**(i-k))*(y**k) return X # Making meshgrid of datapoints and compute Franke's function n = 5 N = 1000 x = np.sort(np.random.uniform(0, 1, N)) y = np.sort(np.random.uniform(0, 1, N)) z = FrankeFunction(x, y) X = create_X(x, y, n=n) # split in training and test data X_train, X_test, y_train, y_test = train_test_split(X,z,test_size=0.2) svm = SVR(gamma='auto',C=10.0) svm.fit(X_train, y_train) # The mean squared error and R2 score print("MSE before scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test), y_test))) print("R2 score before scaling {:.2f}".format(svm.score(X_test,y_test))) scaler = StandardScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) print("Feature min values before scaling:\n {}".format(X_train.min(axis=0))) print("Feature max values before scaling:\n {}".format(X_train.max(axis=0))) print("Feature min values after scaling:\n {}".format(X_train_scaled.min(axis=0))) print("Feature max values after scaling:\n {}".format(X_train_scaled.max(axis=0))) svm = SVR(gamma='auto',C=10.0) svm.fit(X_train_scaled, y_train) print("MSE after scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test_scaled), y_test))) print("R2 score for scaled data: {:.2f}".format(svm.score(X_test_scaled,y_test))) ## Simple preprocessing examples, breast cancer data and classification, Support Vector Machines We show here how we can use a simple regression case on the breast cancer data using support vector machines (SVM) as algorithm for classification. import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split from sklearn.datasets import load_breast_cancer from sklearn.svm import SVC cancer = load_breast_cancer() X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0) print(X_train.shape) print(X_test.shape) svm = SVC(C=100) svm.fit(X_train, y_train) print("Test set accuracy: {:.2f}".format(svm.score(X_test,y_test))) from sklearn.preprocessing import MinMaxScaler, StandardScaler scaler = MinMaxScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) print("Feature min values before scaling:\n {}".format(X_train.min(axis=0))) print("Feature max values before scaling:\n {}".format(X_train.max(axis=0))) print("Feature min values before scaling:\n {}".format(X_train_scaled.min(axis=0))) print("Feature max values before scaling:\n {}".format(X_train_scaled.max(axis=0))) svm.fit(X_train_scaled, y_train) print("Test set accuracy scaled data with Min-Max scaling: {:.2f}".format(svm.score(X_test_scaled,y_test))) scaler = StandardScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) svm.fit(X_train_scaled, y_train) print("Test set accuracy scaled data with Standar Scaler: {:.2f}".format(svm.score(X_test_scaled,y_test))) ## More on Cancer Data, now with Logistic Regression import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split from sklearn.datasets import load_breast_cancer from sklearn.linear_model import LogisticRegression cancer = load_breast_cancer() # Set up training data X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0) logreg = LogisticRegression() logreg.fit(X_train, y_train) print("Test set accuracy: {:.2f}".format(logreg.score(X_test,y_test))) # Scale data from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) logreg.fit(X_train_scaled, y_train) print("Test set accuracy scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test))) ## Why should we think of reducing the dimensionality In addition to the plot of the features, we study now also the covariance (and the correlation matrix). We use also **Pandas** to compute the correlation matrix. import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split from sklearn.datasets import load_breast_cancer from sklearn.linear_model import LogisticRegression cancer = load_breast_cancer() import pandas as pd # Making a data frame cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names) fig, axes = plt.subplots(15,2,figsize=(10,20)) malignant = cancer.data[cancer.target == 0] benign = cancer.data[cancer.target == 1] ax = axes.ravel() for i in range(30): _, bins = np.histogram(cancer.data[:,i], bins =50) ax[i].hist(malignant[:,i], bins = bins, alpha = 0.5) ax[i].hist(benign[:,i], bins = bins, alpha = 0.5) ax[i].set_title(cancer.feature_names[i]) ax[i].set_yticks(()) ax[0].set_xlabel("Feature magnitude") ax[0].set_ylabel("Frequency") ax[0].legend(["Malignant", "Benign"], loc ="best") fig.tight_layout() plt.show() import seaborn as sns correlation_matrix = cancerpd.corr().round(1) # use the heatmap function from seaborn to plot the correlation matrix # annot = True to print the values inside the square sns.heatmap(data=correlation_matrix, annot=True) plt.show() #print eigvalues of correlation matrix EigValues, EigVectors = np.linalg.eig(correlation_matrix) print(EigValues) In the above example we note two things. In the first plot we display the overlap of benign and malignant tumors as functions of the various features in the Wisconsing breast cancer data set. We see that for some of the features we can distinguish clearly the benign and malignant cases while for other features we cannot. This can point to us which features may be of greater interest when we wish to classify a benign or not benign tumour. In the second figure we have computed the so-called correlation matrix, which in our case with thirty features becomes a $30\times 30$ matrix. We constructed this matrix using **pandas** via the statements cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names) and then correlation_matrix = cancerpd.corr().round(1) Diagonalizing this matrix we can in turn say something about which features are of relevance and which are not. But before we proceed we need to define covariance and correlation matrices. This leads us to the classical Principal Component Analysis (PCA) theorem with applications. ## Basic ideas of the Principal Component Analysis (PCA) The principal component analysis deals with the problem of fitting a low-dimensional affine subspace $S$ of dimension $d$ much smaller than the totaldimension $D$ of the problem at hand (our data set). Mathematically it can be formulated as a statistical problem or a geometric problem. In our discussion of the theorem for the classical PCA, we will stay with a statistical approach. This is also what set the scene historically which for the PCA. We have a data set defined by a design/feature matrix $\boldsymbol{X}$ (see below for its definition) * Each data point is determined by $p$ extrinsic (measurement) variables * We may want to ask the following question: Are there fewer intrinsic variables (say $d << p$) that still approximately describe the data? * If so, these intrinsic variables may tell us something important and finding these intrinsic variables is what dimension reduction methods do. ## Introducing the Covariance and Correlation functions Before we discuss the PCA theorem, we need to remind ourselves about the definition of the covariance and the correlation function. These are quantities Suppose we have defined two vectors $\hat{x}$ and $\hat{y}$ with $n$ elements each. The covariance matrix $\boldsymbol{C}$ is defined as $$ \boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{cov}[\boldsymbol{x},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\ \mathrm{cov}[\boldsymbol{y},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{y},\boldsymbol{y}] \\ \end{bmatrix}, $$ where for example $$ \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}). $$ With this definition and recalling that the variance is defined as $$ \mathrm{var}[\boldsymbol{x}]=\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})^2, $$ we can rewrite the covariance matrix as $$ \boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{var}[\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\ \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] & \mathrm{var}[\boldsymbol{y}] \\ \end{bmatrix}. $$ The covariance takes values between zero and infinity and may thus lead to problems with loss of numerical precision for particularly large values. It is common to scale the covariance matrix by introducing instead the correlation matrix defined via the so-called correlation function $$ \mathrm{corr}[\boldsymbol{x},\boldsymbol{y}]=\frac{\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}]}{\sqrt{\mathrm{var}[\boldsymbol{x}] \mathrm{var}[\boldsymbol{y}]}}. $$ The correlation function is then given by values $\mathrm{corr}[\boldsymbol{x},\boldsymbol{y}] \in [-1,1]$. This avoids eventual problems with too large values. We can then define the correlation matrix for the two vectors $\boldsymbol{x}$ and $\boldsymbol{y}$ as $$ \boldsymbol{K}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} 1 & \mathrm{corr}[\boldsymbol{x},\boldsymbol{y}] \\ \mathrm{corr}[\boldsymbol{y},\boldsymbol{x}] & 1 \\ \end{bmatrix}, $$ In the above example this is the function we constructed using **pandas**. ## Correlation Function and Design/Feature Matrix In our derivation of the various regression algorithms like **Ordinary Least Squares** or **Ridge regression** we defined the design/feature matrix $\boldsymbol{X}$ as $$ \boldsymbol{X}=\begin{bmatrix} x_{0,0} & x_{0,1} & x_{0,2}& \dots & \dots x_{0,p-1}\\ x_{1,0} & x_{1,1} & x_{1,2}& \dots & \dots x_{1,p-1}\\ x_{2,0} & x_{2,1} & x_{2,2}& \dots & \dots x_{2,p-1}\\ \dots & \dots & \dots & \dots \dots & \dots \\ x_{n-2,0} & x_{n-2,1} & x_{n-2,2}& \dots & \dots x_{n-2,p-1}\\ x_{n-1,0} & x_{n-1,1} & x_{n-1,2}& \dots & \dots x_{n-1,p-1}\\ \end{bmatrix}, $$ with $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$, with the predictors/features $p$ refering to the column numbers and the entries $n$ being the row elements. We can rewrite the design/feature matrix in terms of its column vectors as $$ \boldsymbol{X}=\begin{bmatrix} \boldsymbol{x}_0 & \boldsymbol{x}_1 & \boldsymbol{x}_2 & \dots & \dots & \boldsymbol{x}_{p-1}\end{bmatrix}, $$ with a given vector $$ \boldsymbol{x}_i^T = \begin{bmatrix}x_{0,i} & x_{1,i} & x_{2,i}& \dots & \dots x_{n-1,i}\end{bmatrix}. $$ With these definitions, we can now rewrite our $2\times 2$ correaltion/covariance matrix in terms of a moe general design/feature matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$. This leads to a $p\times p$ covariance matrix for the vectors $\boldsymbol{x}_i$ with $i=0,1,\dots,p-1$ $$ \boldsymbol{C}[\boldsymbol{x}] = \begin{bmatrix} \mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\ \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\ \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_1] & \mathrm{var}[\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\ \dots & \dots & \dots & \dots & \dots & \dots \\ \dots & \dots & \dots & \dots & \dots & \dots \\ \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & \mathrm{var}[\boldsymbol{x}_{p-1}]\\ \end{bmatrix}, $$ and the correlation matrix $$ \boldsymbol{K}[\boldsymbol{x}] = \begin{bmatrix} 1 & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\ \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_0] & 1 & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\ \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_1] & 1 & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\ \dots & \dots & \dots & \dots & \dots & \dots \\ \dots & \dots & \dots & \dots & \dots & \dots \\ \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & 1\\ \end{bmatrix}, $$ ## Covariance Matrix Examples The Numpy function **np.cov** calculates the covariance elements using the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have the exact mean values. The following simple function uses the **np.vstack** function which takes each vector of dimension $1\times n$ and produces a $2\times n$ matrix $\boldsymbol{W}$ $$ \boldsymbol{W} = \begin{bmatrix} x_0 & y_0 \\ x_1 & y_1 \\ x_2 & y_2\\ \dots & \dots \\ x_{n-2} & y_{n-2}\\ x_{n-1} & y_{n-1} & \end{bmatrix}, $$ which in turn is converted into into the $2\times 2$ covariance matrix $\boldsymbol{C}$ via the Numpy function **np.cov()**. We note that we can also calculate the mean value of each set of samples $\boldsymbol{x}$ etc using the Numpy function **np.mean(x)**. We can also extract the eigenvalues of the covariance matrix through the **np.linalg.eig()** function. # Importing various packages import numpy as np n = 100 x = np.random.normal(size=n) print(np.mean(x)) y = 4+3*x+np.random.normal(size=n) print(np.mean(y)) W = np.vstack((x, y)) C = np.cov(W) print(C) ## Correlation Matrix The previous example can be converted into the correlation matrix by simply scaling the matrix elements with the variances. We should also subtract the mean values for each column. This leads to the following code which sets up the correlations matrix for the previous example in a more brute force way. Here we scale the mean values for each column of the design matrix, calculate the relevant mean values and variances and then finally set up the $2\times 2$ correlation matrix (since we have only two vectors). import numpy as np n = 100 # define two vectors x = np.random.random(size=n) y = 4+3*x+np.random.normal(size=n) #scaling the x and y vectors x = x - np.mean(x) y = y - np.mean(y) variance_x = np.sum(x@x)/n variance_y = np.sum(y@y)/n print(variance_x) print(variance_y) cov_xy = np.sum(x@y)/n cov_xx = np.sum(x@x)/n cov_yy = np.sum(y@y)/n C = np.zeros((2,2)) C[0,0]= cov_xx/variance_x C[1,1]= cov_yy/variance_y C[0,1]= cov_xy/np.sqrt(variance_y*variance_x) C[1,0]= C[0,1] print(C) We see that the matrix elements along the diagonal are one as they should be and that the matrix is symmetric. Furthermore, diagonalizing this matrix we easily see that it is a positive definite matrix. The above procedure with **numpy** can be made more compact if we use **pandas**. ## Correlation Matrix with Pandas We whow here how we can set up the correlation matrix using **pandas**, as done in this simple code import numpy as np import pandas as pd n = 10 x = np.random.normal(size=n) x = x - np.mean(x) y = 4+3*x+np.random.normal(size=n) y = y - np.mean(y) X = (np.vstack((x, y))).T print(X) Xpd = pd.DataFrame(X) print(Xpd) correlation_matrix = Xpd.corr() print(correlation_matrix) We expand this model to the Franke function discussed above. ## Correlation Matrix with Pandas and the Franke function # Common imports import numpy as np import pandas as pd def FrankeFunction(x,y): term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2)) term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1)) term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2)) term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2) return term1 + term2 + term3 + term4 def create_X(x, y, n ): if len(x.shape) > 1: x = np.ravel(x) y = np.ravel(y) N = len(x) l = int((n+1)*(n+2)/2) # Number of elements in beta X = np.ones((N,l)) for i in range(1,n+1): q = int((i)*(i+1)/2) for k in range(i+1): X[:,q+k] = (x**(i-k))*(y**k) return X # Making meshgrid of datapoints and compute Franke's function n = 4 N = 100 x = np.sort(np.random.uniform(0, 1, N)) y = np.sort(np.random.uniform(0, 1, N)) z = FrankeFunction(x, y) X = create_X(x, y, n=n) Xpd = pd.DataFrame(X) # subtract the mean values and set up the covariance matrix Xpd = Xpd - Xpd.mean() covariance_matrix = Xpd.cov() print(covariance_matrix) We note here that the covariance is zero for the first rows and columns since all matrix elements in the design matrix were set to one (we are fitting the function in terms of a polynomial of degree $n$). This means that the variance for these elements will be zero and will cause problems when we set up the correlation matrix. We can simply drop these elements and construct a correlation matrix without these elements. ## Rewriting the Covariance and/or Correlation Matrix We can rewrite the covariance matrix in a more compact form in terms of the design/feature matrix $\boldsymbol{X}$ as $$ \boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T= \mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T]. $$ To see this let us simply look at a design matrix $\boldsymbol{X}\in {\mathbb{R}}^{2\times 2}$ $$ \boldsymbol{X}=\begin{bmatrix} x_{00} & x_{01}\\ x_{10} & x_{11}\\ \end{bmatrix}=\begin{bmatrix} \boldsymbol{x}_{0} & \boldsymbol{x}_{1}\\ \end{bmatrix}. $$ If we then compute the expectation value $$ \mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T=\begin{bmatrix} x_{00}^2+x_{01}^2 & x_{00}x_{10}+x_{01}x_{11}\\ x_{10}x_{00}+x_{11}x_{01} & x_{10}^2+x_{11}^2\\ \end{bmatrix}, $$ which is just $$ \boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]=\begin{bmatrix} \mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] \\ \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] \\ \end{bmatrix}, $$ where we wrote $$\boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]$$ to indicate that this the covariance of the vectors $\boldsymbol{x}$ of the design/feature matrix $\boldsymbol{X}$. It is easy to generalize this to a matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$. ## Towards the PCA theorem We have that the covariance matrix (the correlation matrix involves a simple rescaling) is given as $$ \boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T= \mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T]. $$ Let us now assume that we can perform a series of orthogonal transformations where we employ some orthogonal matrices $\boldsymbol{S}$. These matrices are defined as $\boldsymbol{S}\in {\mathbb{R}}^{p\times p}$ and obey the orthogonality requirements $\boldsymbol{S}\boldsymbol{S}^T=\boldsymbol{S}^T\boldsymbol{S}=\boldsymbol{I}$. The matrix can be written out in terms of the column vectors $\boldsymbol{s}_i$ as $\boldsymbol{S}=[\boldsymbol{s}_0,\boldsymbol{s}_1,\dots,\boldsymbol{s}_{p-1}]$ and $\boldsymbol{s}_i \in {\mathbb{R}}^{p}$. Assume also that there is a transformation $\boldsymbol{S}\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T=\boldsymbol{C}[\boldsymbol{y}]$ such that the new matrix $\boldsymbol{C}[\boldsymbol{y}]$ is diagonal with elements $[\lambda_0,\lambda_1,\lambda_2,\dots,\lambda_{p-1}]$. That is we have $$ \boldsymbol{C}[\boldsymbol{y}] = \mathbb{E}[\boldsymbol{S}\boldsymbol{X}\boldsymbol{X}^T\boldsymbol{S}^T]=\boldsymbol{S}\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T, $$ since the matrix $\boldsymbol{S}$ is not a data dependent matrix. Multiplying with $\boldsymbol{S}^T$ from the left we have $$ \boldsymbol{S}^T\boldsymbol{C}[\boldsymbol{y}] = \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T, $$ and since $\boldsymbol{C}[\boldsymbol{y}]$ is diagonal we have for a given eigenvalue $i$ of the covariance matrix that $$ \boldsymbol{S}^T_i\lambda_i = \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T_i. $$ In the derivation of the PCA theorem we will assume that the eigenvalues are ordered in descending order, that is $\lambda_0 > \lambda_1 > \dots > \lambda_{p-1}$. The eigenvalues tell us then how much we need to stretch the corresponding eigenvectors. Dimensions with large eigenvalues have thus large variations (large variance) and define therefore useful dimensions. The data points are more spread out in the direction of these eigenvectors. Smaller eigenvalues mean on the other hand that the corresponding eigenvectors are shrunk accordingly and the data points are tightly bunched together and there is not much variation in these specific directions. Hopefully then we could leave it out dimensions where the eigenvalues are very small. If $p$ is very large, we could then aim at reducing $p$ to $l << p$ and handle only $l$ features/predictors. ## The Algorithm before theorem Here's how we would proceed in setting up the algorithm for the PCA, see also discussion below here. * Set up the datapoints for the design/feature matrix $\boldsymbol{X}$ with $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$, with the predictors/features $p$ referring to the column numbers and the entries $n$ being the row elements. $$ \boldsymbol{X}=\begin{bmatrix} x_{0,0} & x_{0,1} & x_{0,2}& \dots & \dots x_{0,p-1}\\ x_{1,0} & x_{1,1} & x_{1,2}& \dots & \dots x_{1,p-1}\\ x_{2,0} & x_{2,1} & x_{2,2}& \dots & \dots x_{2,p-1}\\ \dots & \dots & \dots & \dots \dots & \dots \\ x_{n-2,0} & x_{n-2,1} & x_{n-2,2}& \dots & \dots x_{n-2,p-1}\\ x_{n-1,0} & x_{n-1,1} & x_{n-1,2}& \dots & \dots x_{n-1,p-1}\\ \end{bmatrix}, $$ * Center the data by subtracting the mean value for each column. This leads to a new matrix $\boldsymbol{X}\rightarrow \overline{\boldsymbol{X}}$. * Compute then the covariance/correlation matrix $\mathbb{E}[\overline{\boldsymbol{X}}\overline{\boldsymbol{X}}^T]$. * Find the eigenpairs of $\boldsymbol{C}$ with eigenvalues $[\lambda_0,\lambda_1,\dots,\lambda_{p-1}]$ and eigenvectors $[\boldsymbol{s}_0,\boldsymbol{s}_1,\dots,\boldsymbol{s}_{p-1}]$. * Order the eigenvalue (and the eigenvectors accordingly) in order of decreasing eigenvalues. * Keep only those $l$ eigenvalues larger than a selected threshold value, discarding thus $p-l$ features since we expect small variations in the data here. ## Writing our own PCA code We will use a simple example first with two-dimensional data drawn from a multivariate normal distribution with the following mean and covariance matrix: $$ \mu = (-1,2) \qquad \Sigma = \begin{bmatrix} 4 & 2 \\ 2 & 2 \end{bmatrix} $$ Note that the mean refers to each column of data. We will generate $n = 1000$ points $X = \{ x_1, \ldots, x_N \}$ from this distribution, and store them in the $1000 \times 2$ matrix $\boldsymbol{X}$. The following Python code aids in setting up the data and writing out the design matrix. Note that the function **multivariate** returns also the covariance discussed above and that it is defined by dividing by $n-1$ instead of $n$. import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython.display import display n = 10000 mean = (-1, 2) cov = [[4, 2], [2, 2]] X = np.random.multivariate_normal(mean, cov, n) Now we are going to implement the PCA algorithm. We will break it down into various substeps. ### Compute the sample mean and center the data The first step of PCA is to compute the sample mean of the data and use it to center the data. Recall that the sample mean is $$ \mu_n = \frac{1}{n} \sum_{i=1}^n x_i $$ and the mean-centered data $\bar{X} = \{ \bar{x}_1, \ldots, \bar{x}_n \}$ takes the form $$ \bar{x}_i = x_i - \mu_n. $$ When you are done with these steps, print out $\mu_n$ to verify it is close to $\mu$ and plot your mean centered data to verify it is centered at the origin! Compare your code with the functionality from **Scikit-Learn** discussed above. The following code elements perform these operations using **pandas** or using our own functionality for doing so. The latter, using **numpy** is rather simple through the **mean()** function. df = pd.DataFrame(X) # Pandas does the centering for us df = df -df.mean() # we center it ourselves X_centered = X - X.mean(axis=0) Alternatively, we could use the functions we discussed earlier for scaling the data set. That is, we could have used the **StandardScaler** function in **Scikit-Learn**, a function which ensures that for each feature/predictor we study the mean value is zero and the variance is one (every column in the design/feature matrix). You would then not get the same results, since we divide by the variance. The diagonal covariance matrix elements will then be one, while the non-diagonal ones need to be divided by $2\sqrt{2}$ for our specific case. ### Compute the sample covariance Now we are going to use the mean centered data to compute the sample covariance of the data by using the following equation $$ \Sigma_n = \frac{1}{n-1} \sum_{i=1}^n \bar{x}_i^T \bar{x}_i = \frac{1}{n-1} \sum_{i=1}^n (x_i - \mu_n)^T (x_i - \mu_n) $$ where the data points $x_i \in \mathbb{R}^p$ (here in this example $p = 2$) are column vectors and $x^T$ is the transpose of $x$. We can write our own code or simply use either the functionaly of **numpy** or that of **pandas**, as follows print(df.cov()) print(np.cov(X_centered.T)) Note that the way we define the covariance matrix here has a factor $n-1$ instead of $n$. This is included in the **cov()** function by **numpy** and **pandas**. Our own code here is not very elegant and asks for obvious improvements. It is tailored to this specific $2\times 2$ covariance matrix. # extract the relevant columns from the centered design matrix of dim n x 2 x = X_centered[:,0] y = X_centered[:,1] Cov = np.zeros((2,2)) Cov[0,1] = np.sum(x.T@y)/(n-1.0) Cov[0,0] = np.sum(x.T@x)/(n-1.0) Cov[1,1] = np.sum(y.T@y)/(n-1.0) Cov[1,0]= Cov[0,1] print("Centered covariance using own code") print(Cov) plt.plot(x, y, 'x') plt.axis('equal') plt.show() Depending on the number of points $n$, we will get results that are close to the covariance values defined above. The plot shows how the data are clustered around a line with slope close to one. Is this expected? ### Diagonalize the sample covariance matrix to obtain the principal components Now we are ready to solve for the principal components! To do so we diagonalize the sample covariance matrix $\Sigma$. We can use the function **np.linalg.eig** to do so. It will return the eigenvalues and eigenvectors of $\Sigma$. Once we have these we can perform the following tasks: * We compute the percentage of the total variance captured by the first principal component * We plot the mean centered data and lines along the first and second principal components * Then we project the mean centered data onto the first and second principal components, and plot the projected data. * Finally, we approximate the data as $$ x_i \approx \tilde{x}_i = \mu_n + \langle x_i, v_0 \rangle v_0 $$ where $v_0$ is the first principal component. Collecting all these steps we can write our own PCA function and compare this with the functionality included in **Scikit-Learn**. The code here outlines some of the elements we could include in the analysis. Feel free to extend upon this in order to address the above questions. # diagonalize and obtain eigenvalues, not necessarily sorted EigValues, EigVectors = np.linalg.eig(Cov) # sort eigenvectors and eigenvalues #permute = EigValues.argsort() #EigValues = EigValues[permute] #EigVectors = EigVectors[:,permute] print("Eigenvalues of Covariance matrix") for i in range(2): print(EigValues[i]) FirstEigvector = EigVectors[:,0] SecondEigvector = EigVectors[:,1] print("First eigenvector") print(FirstEigvector) print("Second eigenvector") print(SecondEigvector) #thereafter we do a PCA with Scikit-learn from sklearn.decomposition import PCA pca = PCA(n_components = 2) X2Dsl = pca.fit_transform(X) print("Eigenvector of largest eigenvalue") print(pca.components_.T[:, 0]) This code does not contain all the above elements, but it shows how we can use **Scikit-Learn** to extract the eigenvector which corresponds to the largest eigenvalue. Try to address the questions we pose before the above code. Try also to change the values of the covariance matrix by making one of the diagonal elements much larger than the other. What do you observe then? ## Classical PCA Theorem We assume now that we have a design matrix $\boldsymbol{X}$ which has been centered as discussed above. For the sake of simplicity we skip the overline symbol. The matrix is defined in terms of the various column vectors $[\boldsymbol{x}_0,\boldsymbol{x}_1,\dots, \boldsymbol{x}_{p-1}]$ each with dimension $\boldsymbol{x}\in {\mathbb{R}}^{n}$. We assume also that we have an orthogonal transformation $\boldsymbol{W}\in {\mathbb{R}}^{p\times p}$. We define the reconstruction error (which is similar to the mean squared error we have seen before) as $$ J(\boldsymbol{W},\boldsymbol{Z}) = \frac{1}{n}\sum_i (\boldsymbol{x}_i - \overline{\boldsymbol{x}}_i)^2, $$ with $\overline{\boldsymbol{x}}_i = \boldsymbol{W}\boldsymbol{z}_i$, where $\boldsymbol{z}_i$ is a row vector with dimension ${\mathbb{R}}^{n}$ of the matrix $\boldsymbol{Z}\in{\mathbb{R}}^{p\times n}$. When doing PCA we want to reduce this dimensionality. The PCA theorem states that minimizing the above reconstruction error corresponds to setting $\boldsymbol{W}=\boldsymbol{S}$, the orthogonal matrix which diagonalizes the empirical covariance(correlation) matrix. The optimal low-dimensional encoding of the data is then given by a set of vectors $\boldsymbol{z}_i$ with at most $l$ vectors, with $l << p$, defined by the orthogonal projection of the data onto the columns spanned by the eigenvectors of the covariance(correlations matrix). The proof which follows will be updated by mid January 2020. ## Proof of the PCA Theorem To show the PCA theorem let us start with the assumption that there is one vector $\boldsymbol{w}_0$ which corresponds to a solution which minimized the reconstruction error $J$. This is an orthogonal vector. It means that we now approximate the reconstruction error in terms of $\boldsymbol{w}_0$ and $\boldsymbol{z}_0$ as $$ J(\boldsymbol{w}_0,\boldsymbol{z}_0)= \frac{1}{n}\sum_i (\boldsymbol{x}_i - z_{i0}\boldsymbol{w}_0)^2=\frac{1}{n}\sum_i (\boldsymbol{x}_i^T\boldsymbol{x}_i - 2z_{i0}\boldsymbol{w}_0^T\boldsymbol{x}_i+z_{i0}^2\boldsymbol{w}_0^T\boldsymbol{w}_0), $$ which we can rewrite due to the orthogonality of $\boldsymbol{w}_i$ as $$ J(\boldsymbol{w}_0,\boldsymbol{z}_0)=\frac{1}{n}\sum_i (\boldsymbol{x}_i^T\boldsymbol{x}_i - 2z_{i0}\boldsymbol{w}_0^T\boldsymbol{x}_i+z_{i0}^2). $$ Minimizing $J$ with respect to the unknown parameters $z_{0i}$ we obtain that $$ z_{i0}=\boldsymbol{w}_0^T\boldsymbol{x}_i, $$ where the vectors on the rhs are known. ## PCA Proof continued We have now found the unknown parameters $z_{i0}$. These correspond to the projected coordinates and we can write $$ J(\boldsymbol{w}_0)= \frac{1}{p}\sum_i (\boldsymbol{x}_i^T\boldsymbol{x}_i - z_{i0}^2)=\mathrm{const}-\frac{1}{n}\sum_i z_{i0}^2. $$ We can show that the variance of the projected coordinates defined by $\boldsymbol{w}_0^T\boldsymbol{x}_i$ are given by $$ \mathrm{var}[\boldsymbol{w}_0^T\boldsymbol{x}_i] = \frac{1}{n}\sum_i z_{i0}^2, $$ since the expectation value of $$ \mathbb{E}[\boldsymbol{w}_0^T\boldsymbol{x}_i] = \mathbb{E}[z_{i0}]= \boldsymbol{w}_0^T\mathbb{E}[\boldsymbol{x}_i]=0, $$ where we have used the fact that our data are centered. Recalling our definition of the covariance as $$ \boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T=\mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T], $$ we have thus that $$ \mathrm{var}[\boldsymbol{w}_0^T\boldsymbol{x}_i] = \frac{1}{n}\sum_i z_{i0}^2=\boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0. $$ We are almost there, we have obtained a relation between minimizing the reconstruction error and the variance and the covariance matrix. Minimizing the error is equivalent to maximizing the variance of the projected data. ## The final step We could trivially maximize the variance of the projection (and thereby minimize the error in the reconstruction function) by letting the norm-2 of $\boldsymbol{w}_0$ go to infinity. However, this norm since we want the matrix $\boldsymbol{W}$ to be an orthogonal matrix, is constrained by $\vert\vert \boldsymbol{w}_0 \vert\vert_2^2=1$. Imposing this condition via a Lagrange multiplier we can then in turn maximize $$ J(\boldsymbol{w}_0)= \boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0+\lambda_0(1-\boldsymbol{w}_0^T\boldsymbol{w}_0). $$ Taking the derivative with respect to $\boldsymbol{w}_0$ we obtain $$ \frac{\partial J(\boldsymbol{w}_0)}{\partial \boldsymbol{w}_0}= 2\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0-2\lambda_0\boldsymbol{w}_0=0, $$ meaning that $$ \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0=\lambda_0\boldsymbol{w}_0. $$ **The direction that maximizes the variance (or minimizes the construction error) is an eigenvector of the covariance matrix**! If we left multiply with $\boldsymbol{w}_0^T$ we have the variance of the projected data is $$ \boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0=\lambda_0. $$ If we want to maximize the variance (minimize the construction error) we simply pick the eigenvector of the covariance matrix with the largest eigenvalue. This establishes the link between the minimization of the reconstruction function $J$ in terms of an orthogonal matrix and the maximization of the variance and thereby the covariance of our observations encoded in the design/feature matrix $\boldsymbol{X}$. The proof for the other eigenvectors $\boldsymbol{w}_1,\boldsymbol{w}_2,\dots$ can be established by applying the above arguments and using the fact that our basis of eigenvectors is orthogonal, see [Murphy chapter 12.2](https://mitpress.mit.edu/books/machine-learning-1). The discussion in chapter 12.2 of Murphy's text has also a nice link with the Singular Value Decomposition theorem. For categorical data, see chapter 12.4 and discussion therein. Additional part of the proof for the other eigenvectors will be added by mid January 2020. ## Geometric Interpretation and link with Singular Value Decomposition This material will be added by mid January 2020. ## Principal Component Analysis Principal Component Analysis (PCA) is by far the most popular dimensionality reduction algorithm. First it identifies the hyperplane that lies closest to the data, and then it projects the data onto it. The following Python code uses NumPy’s **svd()** function to obtain all the principal components of the training set, then extracts the first two principal components. First we center the data using either **pandas** or our own code import numpy as np import pandas as pd from IPython.display import display np.random.seed(100) # setting up a 10 x 5 vanilla matrix rows = 10 cols = 5 X = np.random.randn(rows,cols) df = pd.DataFrame(X) # Pandas does the centering for us df = df -df.mean() display(df) # we center it ourselves X_centered = X - X.mean(axis=0) # Then check the difference between pandas and our own set up print(X_centered-df) #Now we do an SVD U, s, V = np.linalg.svd(X_centered) c1 = V.T[:, 0] c2 = V.T[:, 1] W2 = V.T[:, :2] X2D = X_centered.dot(W2) print(X2D) PCA assumes that the dataset is centered around the origin. Scikit-Learn’s PCA classes take care of centering the data for you. However, if you implement PCA yourself (as in the preceding example), or if you use other libraries, don’t forget to center the data first. Once you have identified all the principal components, you can reduce the dimensionality of the dataset down to $d$ dimensions by projecting it onto the hyperplane defined by the first $d$ principal components. Selecting this hyperplane ensures that the projection will preserve as much variance as possible. W2 = V.T[:, :2] X2D = X_centered.dot(W2) ## PCA and scikit-learn Scikit-Learn’s PCA class implements PCA using SVD decomposition just like we did before. The following code applies PCA to reduce the dimensionality of the dataset down to two dimensions (note that it automatically takes care of centering the data): #thereafter we do a PCA with Scikit-learn from sklearn.decomposition import PCA pca = PCA(n_components = 2) X2D = pca.fit_transform(X) print(X2D) After fitting the PCA transformer to the dataset, you can access the principal components using the components variable (note that it contains the PCs as horizontal vectors, so, for example, the first principal component is equal to pca.components_.T[:, 0]. Another very useful piece of information is the explained variance ratio of each principal component, available via the $explained\_variance\_ratio$ variable. It indicates the proportion of the dataset’s variance that lies along the axis of each principal component. ## Back to the Cancer Data We can now repeat the above but applied to real data, in this case our breast cancer data. Here we compute performance scores on the training data using logistic regression. import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split from sklearn.datasets import load_breast_cancer from sklearn.linear_model import LogisticRegression cancer = load_breast_cancer() X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0) logreg = LogisticRegression() logreg.fit(X_train, y_train) print("Train set accuracy from Logistic Regression: {:.2f}".format(logreg.score(X_train,y_train))) # We scale the data from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train) X_train_scaled = scaler.transform(X_train) X_test_scaled = scaler.transform(X_test) # Then perform again a log reg fit logreg.fit(X_train_scaled, y_train) print("Train set accuracy scaled data: {:.2f}".format(logreg.score(X_train_scaled,y_train))) #thereafter we do a PCA with Scikit-learn from sklearn.decomposition import PCA pca = PCA(n_components = 2) X2D_train = pca.fit_transform(X_train_scaled) # and finally compute the log reg fit and the score on the training data logreg.fit(X2D_train,y_train) print("Train set accuracy scaled and PCA data: {:.2f}".format(logreg.score(X2D_train,y_train))) We see that our training data after the PCA decomposition has a performance similar to the non-scaled data. ## More on the PCA Instead of arbitrarily choosing the number of dimensions to reduce down to, it is generally preferable to choose the number of dimensions that add up to a sufficiently large portion of the variance (e.g., 95%). Unless, of course, you are reducing dimensionality for data visualization — in that case you will generally want to reduce the dimensionality down to 2 or 3. The following code computes PCA without reducing dimensionality, then computes the minimum number of dimensions required to preserve 95% of the training set’s variance: pca = PCA() pca.fit(X) cumsum = np.cumsum(pca.explained_variance_ratio_) d = np.argmax(cumsum >= 0.95) + 1 You could then set $n\_components=d$ and run PCA again. However, there is a much better option: instead of specifying the number of principal components you want to preserve, you can set $n\_components$ to be a float between 0.0 and 1.0, indicating the ratio of variance you wish to preserve: pca = PCA(n_components=0.95) X_reduced = pca.fit_transform(X) ## Incremental PCA One problem with the preceding implementation of PCA is that it requires the whole training set to fit in memory in order for the SVD algorithm to run. Fortunately, Incremental PCA (IPCA) algorithms have been developed: you can split the training set into mini-batches and feed an IPCA algorithm one minibatch at a time. This is useful for large training sets, and also to apply PCA online (i.e., on the fly, as new instances arrive). ## Randomized PCA Scikit-Learn offers yet another option to perform PCA, called Randomized PCA. This is a stochastic algorithm that quickly finds an approximation of the first d principal components. Its computational complexity is $O(m \times d^2)+O(d^3)$, instead of $O(m \times n^2) + O(n^3)$, so it is dramatically faster than the previous algorithms when $d$ is much smaller than $n$. ## Kernel PCA The kernel trick is a mathematical technique that implicitly maps instances into a very high-dimensional space (called the feature space), enabling nonlinear classification and regression with Support Vector Machines. Recall that a linear decision boundary in the high-dimensional feature space corresponds to a complex nonlinear decision boundary in the original space. It turns out that the same trick can be applied to PCA, making it possible to perform complex nonlinear projections for dimensionality reduction. This is called Kernel PCA (kPCA). It is often good at preserving clusters of instances after projection, or sometimes even unrolling datasets that lie close to a twisted manifold. For example, the following code uses Scikit-Learn’s KernelPCA class to perform kPCA with an from sklearn.decomposition import KernelPCA rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.04) X_reduced = rbf_pca.fit_transform(X) ## LLE Locally Linear Embedding (LLE) is another very powerful nonlinear dimensionality reduction (NLDR) technique. It is a Manifold Learning technique that does not rely on projections like the previous algorithms. In a nutshell, LLE works by first measuring how each training instance linearly relates to its closest neighbors (c.n.), and then looking for a low-dimensional representation of the training set where these local relationships are best preserved (more details shortly). ## Other techniques There are many other dimensionality reduction techniques, several of which are available in Scikit-Learn. Here are some of the most popular: * **Multidimensional Scaling (MDS)** reduces dimensionality while trying to preserve the distances between the instances. * **Isomap** creates a graph by connecting each instance to its nearest neighbors, then reduces dimensionality while trying to preserve the geodesic distances between the instances. * **t-Distributed Stochastic Neighbor Embedding** (t-SNE) reduces dimensionality while trying to keep similar instances close and dissimilar instances apart. It is mostly used for visualization, in particular to visualize clusters of instances in high-dimensional space (e.g., to visualize the MNIST images in 2D). * Linear Discriminant Analysis (LDA) is actually a classification algorithm, but during training it learns the most discriminative axes between the classes, and these axes can then be used to define a hyperplane onto which to project the data. The benefit is that the projection will keep classes as far apart as possible, so LDA is a good technique to reduce dimensionality before running another classification algorithm such as a Support Vector Machine (SVM) classifier discussed in the SVM lectures.
cc0-1.0
weixuanfu/tpot
tpot/config/regressor_sparse.py
1
3127
# -*- coding: utf-8 -*- """This file is part of the TPOT library. TPOT was primarily developed at the University of Pennsylvania by: - Randal S. Olson (rso@randalolson.com) - Weixuan Fu (weixuanf@upenn.edu) - Daniel Angell (dpa34@drexel.edu) - and many more generous open source contributors TPOT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. TPOT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with TPOT. If not, see <http://www.gnu.org/licenses/>. """ import numpy as np regressor_config_sparse = { 'tpot.builtins.OneHotEncoder': { 'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25] }, 'sklearn.neighbors.KNeighborsRegressor': { 'n_neighbors': range(1, 101), 'weights': ["uniform", "distance"], 'p': [1, 2] }, 'sklearn.ensemble.RandomForestRegressor': { 'n_estimators': [100], 'max_features': np.arange(0.05, 1.01, 0.05), 'min_samples_split': range(2, 21), 'min_samples_leaf': range(1, 21), 'bootstrap': [True, False] }, 'sklearn.feature_selection.SelectFwe': { 'alpha': np.arange(0, 0.05, 0.001), 'score_func': { 'sklearn.feature_selection.f_regression': None } }, 'sklearn.feature_selection.SelectPercentile': { 'percentile': range(1, 100), 'score_func': { 'sklearn.feature_selection.f_regression': None } }, 'sklearn.feature_selection.VarianceThreshold': { 'threshold': np.arange(0.05, 1.01, 0.05) }, 'sklearn.feature_selection.SelectFromModel': { 'threshold': np.arange(0, 1.01, 0.05), 'estimator': { 'sklearn.ensemble.ExtraTreesRegressor': { 'n_estimators': [100], 'max_features': np.arange(0.05, 1.01, 0.05) } } }, 'sklearn.linear_model.ElasticNetCV': { 'l1_ratio': np.arange(0.0, 1.01, 0.05), 'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1] }, 'sklearn.linear_model.RidgeCV': { }, 'sklearn.svm.LinearSVR': { 'loss': ["epsilon_insensitive", "squared_epsilon_insensitive"], 'dual': [True, False], 'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1], 'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.], 'epsilon': [1e-4, 1e-3, 1e-2, 1e-1, 1.] }, 'xgboost.XGBRegressor': { 'n_estimators': [100], 'max_depth': range(1, 11), 'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.], 'subsample': np.arange(0.05, 1.01, 0.05), 'min_child_weight': range(1, 21), 'n_jobs': [1], 'verbosity': [0], 'objective': ['reg:squarederror'] }}
lgpl-3.0
Nyker510/scikit-learn
sklearn/covariance/robust_covariance.py
198
29735
""" Robust location and covariance estimators. Here are implemented estimators that are resistant to outliers. """ # Author: Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import warnings import numbers import numpy as np from scipy import linalg from scipy.stats import chi2 from . import empirical_covariance, EmpiricalCovariance from ..utils.extmath import fast_logdet, pinvh from ..utils import check_random_state, check_array # Minimum Covariance Determinant # Implementing of an algorithm by Rousseeuw & Van Driessen described in # (A Fast Algorithm for the Minimum Covariance Determinant Estimator, # 1999, American Statistical Association and the American Society # for Quality, TECHNOMETRICS) # XXX Is this really a public function? It's not listed in the docs or # exported by sklearn.covariance. Deprecate? def c_step(X, n_support, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. Parameters ---------- X : array-like, shape (n_samples, n_features) Data set in which we look for the n_support observations whose scatter matrix has minimum determinant. n_support : int, > n_samples / 2 Number of observations to compute the robust estimates of location and covariance from. remaining_iterations : int, optional Number of iterations to perform. According to [Rouseeuw1999]_, two iterations are sufficient to get close to the minimum, and we never need more than 30 to reach convergence. initial_estimates : 2-tuple, optional Initial estimates of location and shape from which to run the c_step procedure: - initial_estimates[0]: an initial location estimate - initial_estimates[1]: an initial covariance estimate verbose : boolean, optional Verbose mode. random_state : integer or numpy.RandomState, optional The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) Returns ------- location : array-like, shape (n_features,) Robust location estimates. covariance : array-like, shape (n_features, n_features) Robust covariance estimates. support : array-like, shape (n_samples,) A mask for the `n_support` observations whose scatter matrix has minimum determinant. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ X = np.asarray(X) random_state = check_random_state(random_state) return _c_step(X, n_support, remaining_iterations=remaining_iterations, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state) def _c_step(X, n_support, random_state, remaining_iterations=30, initial_estimates=None, verbose=False, cov_computation_method=empirical_covariance): n_samples, n_features = X.shape # Initialisation support = np.zeros(n_samples, dtype=bool) if initial_estimates is None: # compute initial robust estimates from a random subset support[random_state.permutation(n_samples)[:n_support]] = True else: # get initial robust estimates from the function parameters location = initial_estimates[0] covariance = initial_estimates[1] # run a special iteration for that case (to get an initial support) precision = pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(1) # compute new estimates support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(0) covariance = cov_computation_method(X_support) # Iterative procedure for Minimum Covariance Determinant computation det = fast_logdet(covariance) previous_det = np.inf while (det < previous_det) and (remaining_iterations > 0): # save old estimates values previous_location = location previous_covariance = covariance previous_det = det previous_support = support # compute a new support from the full data set mahalanobis distances precision = pinvh(covariance) X_centered = X - location dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) # compute new estimates support = np.zeros(n_samples, dtype=bool) support[np.argsort(dist)[:n_support]] = True X_support = X[support] location = X_support.mean(axis=0) covariance = cov_computation_method(X_support) det = fast_logdet(covariance) # update remaining iterations for early stopping remaining_iterations -= 1 previous_dist = dist dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1) # Catch computation errors if np.isinf(det): raise ValueError( "Singular covariance matrix. " "Please check that the covariance matrix corresponding " "to the dataset is full rank and that MinCovDet is used with " "Gaussian-distributed data (or at least data drawn from a " "unimodal, symmetric distribution.") # Check convergence if np.allclose(det, previous_det): # c_step procedure converged if verbose: print("Optimal couple (location, covariance) found before" " ending iterations (%d left)" % (remaining_iterations)) results = location, covariance, det, support, dist elif det > previous_det: # determinant has increased (should not happen) warnings.warn("Warning! det > previous_det (%.15f > %.15f)" % (det, previous_det), RuntimeWarning) results = previous_location, previous_covariance, \ previous_det, previous_support, previous_dist # Check early stopping if remaining_iterations == 0: if verbose: print('Maximum number of iterations reached') results = location, covariance, det, support, dist return results def select_candidates(X, n_support, n_trials, select=1, n_iter=30, verbose=False, cov_computation_method=empirical_covariance, random_state=None): """Finds the best pure subset of observations to compute MCD from it. The purpose of this function is to find the best sets of n_support observations with respect to a minimization of their covariance matrix determinant. Equivalently, it removes n_samples-n_support observations to construct what we call a pure data set (i.e. not containing outliers). The list of the observations of the pure data set is referred to as the `support`. Starting from a random support, the pure data set is found by the c_step procedure introduced by Rousseeuw and Van Driessen in [Rouseeuw1999]_. Parameters ---------- X : array-like, shape (n_samples, n_features) Data (sub)set in which we look for the n_support purest observations. n_support : int, [(n + p + 1)/2] < n_support < n The number of samples the pure data set must contain. select : int, int > 0 Number of best candidates results to return. n_trials : int, nb_trials > 0 or 2-tuple Number of different initial sets of observations from which to run the algorithm. Instead of giving a number of trials to perform, one can provide a list of initial estimates that will be used to iteratively run c_step procedures. In this case: - n_trials[0]: array-like, shape (n_trials, n_features) is the list of `n_trials` initial location estimates - n_trials[1]: array-like, shape (n_trials, n_features, n_features) is the list of `n_trials` initial covariances estimates n_iter : int, nb_iter > 0 Maximum number of iterations for the c_step procedure. (2 is enough to be close to the final solution. "Never" exceeds 20). random_state : integer or numpy.RandomState, default None The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) verbose : boolean, default False Control the output verbosity. See Also --------- c_step Returns ------- best_locations : array-like, shape (select, n_features) The `select` location estimates computed from the `select` best supports found in the data set (`X`). best_covariances : array-like, shape (select, n_features, n_features) The `select` covariance estimates computed from the `select` best supports found in the data set (`X`). best_supports : array-like, shape (select, n_samples) The `select` best supports found in the data set (`X`). References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS """ random_state = check_random_state(random_state) n_samples, n_features = X.shape if isinstance(n_trials, numbers.Integral): run_from_estimates = False elif isinstance(n_trials, tuple): run_from_estimates = True estimates_list = n_trials n_trials = estimates_list[0].shape[0] else: raise TypeError("Invalid 'n_trials' parameter, expected tuple or " " integer, got %s (%s)" % (n_trials, type(n_trials))) # compute `n_trials` location and shape estimates candidates in the subset all_estimates = [] if not run_from_estimates: # perform `n_trials` computations from random initial supports for j in range(n_trials): all_estimates.append( _c_step( X, n_support, remaining_iterations=n_iter, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) else: # perform computations from every given initial estimates for j in range(n_trials): initial_estimates = (estimates_list[0][j], estimates_list[1][j]) all_estimates.append(_c_step( X, n_support, remaining_iterations=n_iter, initial_estimates=initial_estimates, verbose=verbose, cov_computation_method=cov_computation_method, random_state=random_state)) all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \ zip(*all_estimates) # find the `n_best` best results among the `n_trials` ones index_best = np.argsort(all_dets_sub)[:select] best_locations = np.asarray(all_locs_sub)[index_best] best_covariances = np.asarray(all_covs_sub)[index_best] best_supports = np.asarray(all_supports_sub)[index_best] best_ds = np.asarray(all_ds_sub)[index_best] return best_locations, best_covariances, best_supports, best_ds def fast_mcd(X, support_fraction=None, cov_computation_method=empirical_covariance, random_state=None): """Estimates the Minimum Covariance Determinant matrix. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- X : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is None, which implies that the minimum value of support_fraction will be used within the algorithm: `[n_sample + n_features + 1] / 2`. random_state : integer or numpy.RandomState, optional The generator used to randomly subsample. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. cov_computation_method : callable, default empirical_covariance The function which will be used to compute the covariance. Must return shape (n_features, n_features) Notes ----- The FastMCD algorithm has been introduced by Rousseuw and Van Driessen in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS". The principle is to compute robust estimates and random subsets before pooling them into a larger subsets, and finally into the full data set. Depending on the size of the initial sample, we have one, two or three such computation levels. Note that only raw estimates are returned. If one is interested in the correction and reweighting steps described in [Rouseeuw1999]_, see the MinCovDet object. References ---------- .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 Returns ------- location : array-like, shape (n_features,) Robust location of the data. covariance : array-like, shape (n_features, n_features) Robust covariance of the features. support : array-like, type boolean, shape (n_samples,) A mask of the observations that have been used to compute the robust location and covariance estimates of the data set. """ random_state = check_random_state(random_state) X = np.asarray(X) if X.ndim == 1: X = np.reshape(X, (1, -1)) warnings.warn("Only one sample available. " "You may want to reshape your data array") n_samples, n_features = X.shape # minimum breakdown value if support_fraction is None: n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) else: n_support = int(support_fraction * n_samples) # 1-dimensional case quick computation # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust # Regression and Outlier Detection, John Wiley & Sons, chapter 4) if n_features == 1: if n_support < n_samples: # find the sample shortest halves X_sorted = np.sort(np.ravel(X)) diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)] halves_start = np.where(diff == np.min(diff))[0] # take the middle points' mean to get the robust location estimate location = 0.5 * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() support = np.zeros(n_samples, dtype=bool) X_centered = X - location support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True covariance = np.asarray([[np.var(X[support])]]) location = np.array([location]) # get precision matrix in an optimized way precision = pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) else: support = np.ones(n_samples, dtype=bool) covariance = np.asarray([[np.var(X)]]) location = np.asarray([np.mean(X)]) X_centered = X - location # get precision matrix in an optimized way precision = pinvh(covariance) dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) # Starting FastMCD algorithm for p-dimensional case if (n_samples > 500) and (n_features > 1): # 1. Find candidate supports on subsets # a. split the set in subsets of size ~ 300 n_subsets = n_samples // 300 n_samples_subsets = n_samples // n_subsets samples_shuffle = random_state.permutation(n_samples) h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) # b. perform a total of 500 trials n_trials_tot = 500 # c. select 10 best (location, covariance) for each subset n_best_sub = 10 n_trials = max(10, n_trials_tot // n_subsets) n_best_tot = n_subsets * n_best_sub all_best_locations = np.zeros((n_best_tot, n_features)) try: all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) except MemoryError: # The above is too big. Let's try with something much small # (and less optimal) all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) n_best_tot = 10 n_best_sub = 2 for i in range(n_subsets): low_bound = i * n_samples_subsets high_bound = low_bound + n_samples_subsets current_subset = X[samples_shuffle[low_bound:high_bound]] best_locations_sub, best_covariances_sub, _, _ = select_candidates( current_subset, h_subset, n_trials, select=n_best_sub, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) all_best_locations[subset_slice] = best_locations_sub all_best_covariances[subset_slice] = best_covariances_sub # 2. Pool the candidate supports into a merged set # (possibly the full dataset) n_samples_merged = min(1500, n_samples) h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) if n_samples > 1500: n_best_merged = 10 else: n_best_merged = 1 # find the best couples (location, covariance) on the merged set selection = random_state.permutation(n_samples)[:n_samples_merged] locations_merged, covariances_merged, supports_merged, d = \ select_candidates( X[selection], h_merged, n_trials=(all_best_locations, all_best_covariances), select=n_best_merged, cov_computation_method=cov_computation_method, random_state=random_state) # 3. Finally get the overall best (locations, covariance) couple if n_samples < 1500: # directly get the best couple (location, covariance) location = locations_merged[0] covariance = covariances_merged[0] support = np.zeros(n_samples, dtype=bool) dist = np.zeros(n_samples) support[selection] = supports_merged[0] dist[selection] = d[0] else: # select the best couple on the full dataset locations_full, covariances_full, supports_full, d = \ select_candidates( X, n_support, n_trials=(locations_merged, covariances_merged), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] elif n_features > 1: # 1. Find the 10 best couples (location, covariance) # considering two iterations n_trials = 30 n_best = 10 locations_best, covariances_best, _, _ = select_candidates( X, n_support, n_trials=n_trials, select=n_best, n_iter=2, cov_computation_method=cov_computation_method, random_state=random_state) # 2. Select the best couple on the full dataset amongst the 10 locations_full, covariances_full, supports_full, d = select_candidates( X, n_support, n_trials=(locations_best, covariances_best), select=1, cov_computation_method=cov_computation_method, random_state=random_state) location = locations_full[0] covariance = covariances_full[0] support = supports_full[0] dist = d[0] return location, covariance, support, dist class MinCovDet(EmpiricalCovariance): """Minimum Covariance Determinant (MCD): robust estimator of covariance. The Minimum Covariance Determinant covariance estimator is to be applied on Gaussian-distributed data, but could still be relevant on data drawn from a unimodal, symmetric distribution. It is not meant to be used with multi-modal data (the algorithm used to fit a MinCovDet object is likely to fail in such a case). One should consider projection pursuit methods to deal with multi-modal datasets. Read more in the :ref:`User Guide <robust_covariance>`. Parameters ---------- store_precision : bool Specify if the estimated precision is stored. assume_centered : Boolean If True, the support of the robust location and the covariance estimates is computed, and a covariance estimate is recomputed from it, without centering the data. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, the robust location and covariance are directly computed with the FastMCD algorithm without additional treatment. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is None, which implies that the minimum value of support_fraction will be used within the algorithm: [n_sample + n_features + 1] / 2 random_state : integer or numpy.RandomState, optional The random generator used. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- raw_location_ : array-like, shape (n_features,) The raw robust estimated location before correction and re-weighting. raw_covariance_ : array-like, shape (n_features, n_features) The raw robust estimated covariance before correction and re-weighting. raw_support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the raw robust estimates of location and shape, before correction and re-weighting. location_ : array-like, shape (n_features,) Estimated robust location covariance_ : array-like, shape (n_features, n_features) Estimated robust covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the robust estimates of location and shape. dist_ : array-like, shape (n_samples,) Mahalanobis distances of the training set (on which `fit` is called) observations. References ---------- .. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression. J. Am Stat Ass, 79:871, 1984.` .. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant Estimator, 1999, American Statistical Association and the American Society for Quality, TECHNOMETRICS` .. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun, Asymptotics For The Minimum Covariance Determinant Estimator, The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400` """ _nonrobust_covariance = staticmethod(empirical_covariance) def __init__(self, store_precision=True, assume_centered=False, support_fraction=None, random_state=None): self.store_precision = store_precision self.assume_centered = assume_centered self.support_fraction = support_fraction self.random_state = random_state def fit(self, X, y=None): """Fits a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data, where n_samples is the number of samples and n_features is the number of features. y : not used, present for API consistence purpose. Returns ------- self : object Returns self. """ X = check_array(X) random_state = check_random_state(self.random_state) n_samples, n_features = X.shape # check that the empirical covariance is full rank if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: warnings.warn("The covariance matrix associated to your dataset " "is not full rank") # compute and store raw estimates raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state) if self.assume_centered: raw_location = np.zeros(n_features) raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True) # get precision matrix in an optimized way precision = pinvh(raw_covariance) raw_dist = np.sum(np.dot(X, precision) * X, 1) self.raw_location_ = raw_location self.raw_covariance_ = raw_covariance self.raw_support_ = raw_support self.location_ = raw_location self.support_ = raw_support self.dist_ = raw_dist # obtain consistency at normal models self.correct_covariance(X) # re-weight estimator self.reweight_covariance(X) return self def correct_covariance(self, data): """Apply a correction to raw Minimum Covariance Determinant estimates. Correction using the empirical correction factor suggested by Rousseeuw and Van Driessen in [Rouseeuw1984]_. Parameters ---------- data : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- covariance_corrected : array-like, shape (n_features, n_features) Corrected robust covariance estimate. """ correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) covariance_corrected = self.raw_covariance_ * correction self.dist_ /= correction return covariance_corrected def reweight_covariance(self, data): """Re-weight raw Minimum Covariance Determinant estimates. Re-weight observations using Rousseeuw's method (equivalent to deleting outlying observations from the data set before computing location and covariance estimates). [Rouseeuw1984]_ Parameters ---------- data : array-like, shape (n_samples, n_features) The data matrix, with p features and n samples. The data set must be the one which was used to compute the raw estimates. Returns ------- location_reweighted : array-like, shape (n_features, ) Re-weighted robust location estimate. covariance_reweighted : array-like, shape (n_features, n_features) Re-weighted robust covariance estimate. support_reweighted : array-like, type boolean, shape (n_samples,) A mask of the observations that have been used to compute the re-weighted robust location and covariance estimates. """ n_samples, n_features = data.shape mask = self.dist_ < chi2(n_features).isf(0.025) if self.assume_centered: location_reweighted = np.zeros(n_features) else: location_reweighted = data[mask].mean(0) covariance_reweighted = self._nonrobust_covariance( data[mask], assume_centered=self.assume_centered) support_reweighted = np.zeros(n_samples, dtype=bool) support_reweighted[mask] = True self._set_covariance(covariance_reweighted) self.location_ = location_reweighted self.support_ = support_reweighted X_centered = data - self.location_ self.dist_ = np.sum( np.dot(X_centered, self.get_precision()) * X_centered, 1) return location_reweighted, covariance_reweighted, support_reweighted
bsd-3-clause
briehl/narrative
src/biokbase/narrative/tests/test_viewers.py
1
5395
import unittest import biokbase.auth from . import util """ Tests for the viewer module """ __author__ = "James Jeffryes <jjeffryes@mcs.anl.gov>" @unittest.skip("Skipping clustergrammer-based tests") class ViewersTestCase(unittest.TestCase): @classmethod def setUpClass(cls): cls.attribute_set_ref = "36095/73/1" cls.generic_ref = "36095/74/1" cls.expression_matrix_ref = "28852/11/1" config = util.TestConfig() cls.user_id = config.get("users", "test_user") cls.user_token = util.read_token_file( config.get_path("token_files", "test_user", from_root=True) ) if cls.user_token: biokbase.auth.set_environ_token(cls.user_token) else: raise unittest.SkipTest("No Token") def test_bad_view_as_clustergrammer_params(self): from biokbase.narrative import viewers with self.assertRaises(AssertionError): viewers.view_as_clustergrammer(self.generic_ref, col_categories="Time") with self.assertRaises(AssertionError): viewers.view_as_clustergrammer(self.generic_ref, row_categories="Time") with self.assertRaises(AssertionError): viewers.view_as_clustergrammer(self.generic_ref, normalize_on="Time") with self.assertRaisesRegex(ValueError, "not a compatible data type"): viewers.view_as_clustergrammer(self.attribute_set_ref) def test__get_categories(self): import pandas as pd from biokbase.narrative import viewers ids = ["WRI_RS00010_CDS_1", "WRI_RS00015_CDS_1", "WRI_RS00025_CDS_1"] mapping = { "WRI_RS00010_CDS_1": "test_row_instance_1", "WRI_RS00015_CDS_1": "test_row_instance_2", "WRI_RS00025_CDS_1": "test_row_instance_3", } index = [ ( "WRI_RS00010_CDS_1", "test_attribute_1: 1", "test_attribute_2: 4", "test_attribute_3: 7", ), ( "WRI_RS00015_CDS_1", "test_attribute_1: 2", "test_attribute_2: 5", "test_attribute_3: 8", ), ( "WRI_RS00025_CDS_1", "test_attribute_1: 3", "test_attribute_2: 6", "test_attribute_3: 9", ), ] filtered_index = [ ("WRI_RS00010_CDS_1", "test_attribute_1: 1"), ("WRI_RS00015_CDS_1", "test_attribute_1: 2"), ("WRI_RS00025_CDS_1", "test_attribute_1: 3"), ] multi_index = pd.MultiIndex( levels=[ ["WRI_RS00010_CDS_1", "WRI_RS00015_CDS_1", "WRI_RS00025_CDS_1"], ["1", "2", "3"], ], labels=[[0, 1, 2], [0, 1, 2]], names=["ID", "test_attribute_1"], ) self.assertEqual(ids, viewers._get_categories(ids, self.generic_ref)) with self.assertRaisesRegex(ValueError, "not in the provided mapping"): viewers._get_categories( ["boo"], self.generic_ref, self.attribute_set_ref, mapping ) with self.assertRaisesRegex(ValueError, "has no attribute"): viewers._get_categories(["boo"], self.generic_ref, self.attribute_set_ref) self.assertEqual( index, viewers._get_categories( ids, self.generic_ref, self.attribute_set_ref, mapping, clustergrammer=True, ), ) pd.testing.assert_index_equal( multi_index, viewers._get_categories( ids, self.generic_ref, self.attribute_set_ref, mapping, {"test_attribute_1"}, ), ) self.assertEqual( filtered_index, viewers._get_categories( ids, self.generic_ref, self.attribute_set_ref, mapping, {"test_attribute_1"}, clustergrammer=True, ), ) def test_get_df(self): import pandas as pd from biokbase.narrative import viewers res = viewers.get_df(self.generic_ref) self.assertIsInstance(res, pd.DataFrame) self.assertEqual(res.shape, (3, 4)) self.assertIsInstance(res.index, pd.MultiIndex) res = viewers.get_df(self.generic_ref, None, None) self.assertIsInstance(res, pd.DataFrame) self.assertEqual(res.shape, (3, 4)) self.assertIsInstance(res.index, pd.Index) res = viewers.get_df(self.generic_ref, clustergrammer=True) self.assertIsInstance(res, pd.DataFrame) self.assertEqual(res.shape, (3, 4)) self.assertIsInstance(res.index, pd.Index) res = viewers.get_df(self.expression_matrix_ref) self.assertIsInstance(res, pd.DataFrame) self.assertEqual(res.shape, (4297, 16)) self.assertIsInstance(res.index, pd.Index) def test_view_as_clustergrammer(self): from biokbase.narrative import viewers self.assertEqual( str(type(viewers.view_as_clustergrammer(self.generic_ref))), "<class 'clustergrammer_widget.example.clustergrammer_widget'>", )
mit
danuker/trading-with-python
nautilus/nautilus.py
77
5403
''' Created on 26 dec. 2011 Copyright: Jev Kuznetsov License: BSD ''' from PyQt4.QtCore import * from PyQt4.QtGui import * from ib.ext.Contract import Contract from ib.opt import ibConnection from ib.ext.Order import Order import tradingWithPython.lib.logger as logger from tradingWithPython.lib.eventSystem import Sender, ExampleListener import tradingWithPython.lib.qtpandas as qtpandas import numpy as np import pandas priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'} class PriceListener(qtpandas.DataFrameModel): def __init__(self): super(PriceListener,self).__init__() self._header = ['position','bid','ask','last'] def addSymbol(self,symbol): data = dict(zip(self._header,[0,np.nan,np.nan,np.nan])) row = pandas.DataFrame(data, index = pandas.Index([symbol])) self.df = self.df.append(row[self._header]) # append data and set correct column order def priceHandler(self,sender,event,msg=None): if msg['symbol'] not in self.df.index: self.addSymbol(msg['symbol']) if msg['type'] in self._header: self.df.ix[msg['symbol'],msg['type']] = msg['price'] self.signalUpdate() #print self.df class Broker(Sender): def __init__(self, name = "broker"): super(Broker,self).__init__() self.name = name self.log = logger.getLogger(self.name) self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__)) self.contracts = {} # a dict to keep track of subscribed contracts self._id2symbol = {} # id-> symbol dict self.tws = None self._nextId = 1 # tws subscription id self.nextValidOrderId = None def connect(self): """ connect to tws """ self.tws = ibConnection() # tws interface self.tws.registerAll(self._defaultHandler) self.tws.register(self._nextValidIdHandler,'NextValidId') self.log.debug('Connecting to tws') self.tws.connect() self.tws.reqAccountUpdates(True,'') self.tws.register(self._priceHandler,'TickPrice') def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'): ''' subscribe to stock data ''' self.log.debug('Subscribing to '+symbol) c = Contract() c.m_symbol = symbol c.m_secType = secType c.m_exchange = exchange c.m_currency = currency subId = self._nextId self._nextId += 1 self.tws.reqMktData(subId,c,'',False) self._id2symbol[subId] = c.m_symbol self.contracts[symbol]=c def disconnect(self): self.tws.disconnect() #------event handlers-------------------- def _defaultHandler(self,msg): ''' default message handler ''' #print msg.typeName if msg.typeName == 'Error': self.log.error(msg) def _nextValidIdHandler(self,msg): self.nextValidOrderId = msg.orderId self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId)) def _priceHandler(self,msg): #translate to meaningful messages message = {'symbol':self._id2symbol[msg.tickerId], 'price':msg.price, 'type':priceTicks[msg.field]} self.dispatch('price',message) #-----------------GUI elements------------------------- class TableView(QTableView): """ extended table view """ def __init__(self,name='TableView1', parent=None): super(TableView,self).__init__(parent) self.name = name self.setSelectionBehavior(QAbstractItemView.SelectRows) def contextMenuEvent(self, event): menu = QMenu(self) Action = menu.addAction("print selected rows") Action.triggered.connect(self.printName) menu.exec_(event.globalPos()) def printName(self): print "Action triggered from " + self.name print 'Selected :' for idx in self.selectionModel().selectedRows(): print self.model().df.ix[idx.row(),:] class Form(QDialog): def __init__(self,parent=None): super(Form,self).__init__(parent) self.broker = Broker() self.price = PriceListener() self.broker.connect() symbols = ['SPY','XLE','QQQ','VXX','XIV'] for symbol in symbols: self.broker.subscribeStk(symbol) self.broker.register(self.price.priceHandler, 'price') widget = TableView(parent=self) widget.setModel(self.price) widget.horizontalHeader().setResizeMode(QHeaderView.Stretch) layout = QVBoxLayout() layout.addWidget(widget) self.setLayout(layout) def __del__(self): print 'Disconnecting.' self.broker.disconnect() if __name__=="__main__": print "Running nautilus" import sys app = QApplication(sys.argv) form = Form() form.show() app.exec_() print "All done."
bsd-3-clause
mfjb/scikit-learn
examples/linear_model/plot_polynomial_interpolation.py
251
1895
#!/usr/bin/env python """ ======================== Polynomial interpolation ======================== This example demonstrates how to approximate a function with a polynomial of degree n_degree by using ridge regression. Concretely, from n_samples 1d points, it suffices to build the Vandermonde matrix, which is n_samples x n_degree+1 and has the following form: [[1, x_1, x_1 ** 2, x_1 ** 3, ...], [1, x_2, x_2 ** 2, x_2 ** 3, ...], ...] Intuitively, this matrix can be interpreted as a matrix of pseudo features (the points raised to some power). The matrix is akin to (but different from) the matrix induced by a polynomial kernel. This example shows that you can do non-linear regression with a linear model, using a pipeline to add non-linear features. Kernel methods extend this idea and can induce very high (even infinite) dimensional feature spaces. """ print(__doc__) # Author: Mathieu Blondel # Jake Vanderplas # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline def f(x): """ function to approximate by polynomial interpolation""" return x * np.sin(x) # generate points used to plot x_plot = np.linspace(0, 10, 100) # generate points and keep a subset of them x = np.linspace(0, 10, 100) rng = np.random.RandomState(0) rng.shuffle(x) x = np.sort(x[:20]) y = f(x) # create matrix versions of these arrays X = x[:, np.newaxis] X_plot = x_plot[:, np.newaxis] plt.plot(x_plot, f(x_plot), label="ground truth") plt.scatter(x, y, label="training points") for degree in [3, 4, 5]: model = make_pipeline(PolynomialFeatures(degree), Ridge()) model.fit(X, y) y_plot = model.predict(X_plot) plt.plot(x_plot, y_plot, label="degree %d" % degree) plt.legend(loc='lower left') plt.show()
bsd-3-clause
bartslinger/paparazzi
sw/misc/attitude_reference/test_att_ref.py
49
3485
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2014 Antoine Drouin # # This file is part of paparazzi. # # paparazzi is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # paparazzi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with paparazzi; see the file COPYING. If not, write to # the Free Software Foundation, 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. # import math import numpy as np import scipy.signal import matplotlib.pyplot as plt import pat.utils as pu import pat.algebra as pa import control as ctl def random_setpoint(time, dt_step=2): tf = time[0] sp = np.zeros((len(time), 3)) sp_i = [0, 0, 0] for i in range(0, len(time)): if time[i] >= tf: ui = np.random.rand(3) - [0.5, 0.5, 0.5]; ai = np.random.rand(1) n = np.linalg.norm(ui) if n > 0: ui /= n sp_i = pa.euler_of_quat(pa.quat_of_axis_angle(ui, ai)) tf += dt_step sp[i] = sp_i return sp def test_ref(r, time, setpoint): ref = np.zeros((len(time), 9)) for i in range(1, time.size): sp_quat = pa.quat_of_euler(setpoint[i]) r.update_quat(sp_quat, time[i] - time[i - 1]) euler = pa.euler_of_quat(r.quat) ref[i] = np.concatenate((euler, r.vel, r.accel)) return ref def plot_ref(time, xref=None, sp=None, figure=None): margins = (0.05, 0.05, 0.98, 0.96, 0.20, 0.34) figure = pu.prepare_fig(figure, window_title='Reference', figsize=(20.48, 10.24), margins=margins) plots = [("$\phi$", "deg"), ("$\\theta$", "deg"), ("$\\psi$", "deg"), ("$p$", "deg/s"), ("$q$", "deg/s"), ("$r$", "deg/s"), ("$\dot{p}$", "deg/s2"), ("$\dot{q}$", "deg/s2"), ("$\dot{r}$", "deg/s2")] for i, (title, ylab) in enumerate(plots): ax = plt.subplot(3, 3, i + 1) if xref is not None: plt.plot(time, pu.deg_of_rad(xref[:, i])) pu.decorate(ax, title=title, ylab=ylab) if sp is not None and i < 3: plt.plot(time, pu.deg_of_rad(sp[:, i])) return figure dt = 1. / 512. time = np.arange(0., 4, dt) sp = np.zeros((len(time), 3)) sp[:, 0] = pu.rad_of_deg(45.) * scipy.signal.square(math.pi / 2 * time + math.pi) # sp[:, 1] = pu.rad_of_deg(5.)*scipy.signal.square(math.pi/2*time) # sp[:, 2] = pu.rad_of_deg(45.) # sp = random_setpoint(time) # rs = [ctl.att_ref_analytic_disc(axis=0), ctl.att_ref_analytic_cont(axis=0), ctl.att_ref_default()] args = {'omega': 10., 'xi': 0.7, 'sat_vel': pu.rad_of_deg(150.), 'sat_accel': pu.rad_of_deg(1800), 'sat_jerk': pu.rad_of_deg(27000)} rs = [ctl.att_ref_sat_naive(**args), ctl.att_ref_sat_nested(**args), ctl.att_ref_sat_nested2(**args)] # rs.append(ctl.AttRefIntNative(**args)) rs.append(ctl.AttRefFloatNative(**args)) xrs = [test_ref(r, time, sp) for r in rs] figure = None for xr in xrs: figure = plot_ref(time, xr, None, figure) figure = plot_ref(time, None, sp, figure) legends = [r.name for r in rs] + ['Setpoint'] plt.subplot(3, 3, 3) plt.legend(legends) plt.show()
gpl-2.0
vlukes/sfepy
examples/large_deformation/compare_elastic_materials.py
5
6888
#!/usr/bin/env python """ Compare various elastic materials w.r.t. uniaxial tension/compression test. Requires Matplotlib. """ from __future__ import absolute_import from argparse import ArgumentParser, RawDescriptionHelpFormatter import sys import six sys.path.append('.') import numpy as nm def define(): """Define the problem to solve.""" from sfepy.discrete.fem.meshio import UserMeshIO from sfepy.mesh.mesh_generators import gen_block_mesh from sfepy.mechanics.matcoefs import stiffness_from_lame def mesh_hook(mesh, mode): """ Generate the block mesh. """ if mode == 'read': mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3', verbose=False) return mesh elif mode == 'write': pass filename_mesh = UserMeshIO(mesh_hook) options = { 'nls' : 'newton', 'ls' : 'ls', 'ts' : 'ts', 'save_times' : 'all', } functions = { 'linear_tension' : (linear_tension,), 'linear_compression' : (linear_compression,), 'empty' : (lambda ts, coor, mode, region, ig: None,), } fields = { 'displacement' : ('real', 3, 'Omega', 1), } # Coefficients are chosen so that the tangent stiffness is the same for all # material for zero strains. # Young modulus = 10 kPa, Poisson's ratio = 0.3 materials = { 'solid' : ({ 'K' : 8.333, # bulk modulus 'mu_nh' : 3.846, # shear modulus of neoHookean term 'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term 'kappa' : 1.923, # second modulus of Mooney-Rivlin term # elasticity for LE term 'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846), },), 'load' : 'empty', } variables = { 'u' : ('unknown field', 'displacement', 0), 'v' : ('test field', 'displacement', 'u'), } regions = { 'Omega' : 'all', 'Bottom' : ('vertices in (z < 0.1)', 'facet'), 'Top' : ('vertices in (z > 2.9)', 'facet'), } ebcs = { 'fixb' : ('Bottom', {'u.all' : 0.0}), 'fixt' : ('Top', {'u.[0,1]' : 0.0}), } integrals = { 'i' : 1, 'isurf' : 2, } equations = { 'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u) = dw_surface_ltr.isurf.Top(load.val, v)""", 'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u) + dw_tl_bulk_penalty.i.Omega(solid.K, v, u) = dw_surface_ltr.isurf.Top(load.val, v)""", 'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u) + dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u) + dw_tl_bulk_penalty.i.Omega(solid.K, v, u) = dw_surface_ltr.isurf.Top(load.val, v)""", } solvers = { 'ls' : ('ls.scipy_direct', {}), 'newton' : ('nls.newton', { 'i_max' : 5, 'eps_a' : 1e-10, 'eps_r' : 1.0, }), 'ts' : ('ts.simple', { 't0' : 0, 't1' : 1, 'dt' : None, 'n_step' : 101, # has precedence over dt! 'verbose' : 1, }), } return locals() ## # Pressure tractions. def linear_tension(ts, coor, mode=None, **kwargs): if mode == 'qp': val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1)) return {'val' : val} def linear_compression(ts, coor, mode=None, **kwargs): if mode == 'qp': val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1)) return {'val' : val} def store_top_u(displacements): """Function _store() will be called at the end of each loading step. Top displacements will be stored into `displacements`.""" def _store(problem, ts, state): top = problem.domain.regions['Top'] top_u = problem.get_variables()['u'].get_state_in_region(top) displacements.append(nm.mean(top_u[:,-1])) return _store def solve_branch(problem, branch_function): displacements = {} for key, eq in six.iteritems(problem.conf.equations): problem.set_equations({key : eq}) load = problem.get_materials()['load'] load.set_function(branch_function) out = [] problem.solve(save_results=False, step_hook=store_top_u(out)) displacements[key] = nm.array(out, dtype=nm.float64) return displacements helps = { 'no_plot' : 'do not show plot window', } def main(): from sfepy.base.base import output from sfepy.base.conf import ProblemConf, get_standard_keywords from sfepy.discrete import Problem from sfepy.base.plotutils import plt parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s') parser.add_argument('-n', '--no-plot', action="store_true", dest='no_plot', default=False, help=helps['no_plot']) options = parser.parse_args() required, other = get_standard_keywords() # Use this file as the input file. conf = ProblemConf.from_file(__file__, required, other) # Create problem instance, but do not set equations. problem = Problem.from_conf(conf, init_equations=False) # Solve the problem. Output is ignored, results stored by using the # step_hook. u_t = solve_branch(problem, linear_tension) u_c = solve_branch(problem, linear_compression) # Get pressure load by calling linear_*() for each time step. ts = problem.get_timestepper() load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val'] for aux in ts.iter_from(0)], dtype=nm.float64).squeeze() load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val'] for aux in ts.iter_from(0)], dtype=nm.float64).squeeze() # Join the branches. displacements = {} for key in u_t.keys(): displacements[key] = nm.r_[u_c[key][::-1], u_t[key]] load = nm.r_[load_c[::-1], load_t] if plt is None: output('matplotlib cannot be imported, printing raw data!') output(displacements) output(load) else: legend = [] for key, val in six.iteritems(displacements): plt.plot(load, val) legend.append(key) plt.legend(legend, loc = 2) plt.xlabel('tension [kPa]') plt.ylabel('displacement [mm]') plt.grid(True) plt.gcf().savefig('pressure_displacement.png') if not options.no_plot: plt.show() if __name__ == '__main__': main()
bsd-3-clause
bgris/ODL_bgris
lib/python3.5/site-packages/matplotlib/backends/backend_webagg.py
10
12190
""" Displays Agg images in the browser, with interactivity """ from __future__ import (absolute_import, division, print_function, unicode_literals) # The WebAgg backend is divided into two modules: # # - `backend_webagg_core.py` contains code necessary to embed a WebAgg # plot inside of a web application, and communicate in an abstract # way over a web socket. # # - `backend_webagg.py` contains a concrete implementation of a basic # application, implemented with tornado. import six import datetime import errno import json import os import random import sys import socket import threading try: import tornado except ImportError: raise RuntimeError("The WebAgg backend requires Tornado.") import tornado.web import tornado.ioloop import tornado.websocket import matplotlib from matplotlib import rcParams from matplotlib import backend_bases from matplotlib.figure import Figure from matplotlib._pylab_helpers import Gcf from . import backend_webagg_core as core from .backend_webagg_core import TimerTornado def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, thisFig) def new_figure_manager_given_figure(num, figure): """ Create a new figure manager instance for the given figure. """ canvas = FigureCanvasWebAgg(figure) manager = core.FigureManagerWebAgg(canvas, num) return manager def draw_if_interactive(): """ Is called after every pylab drawing command """ if matplotlib.is_interactive(): figManager = Gcf.get_active() if figManager is not None: figManager.canvas.draw_idle() class Show(backend_bases.ShowBase): def mainloop(self): WebAggApplication.initialize() url = "http://127.0.0.1:{port}{prefix}".format( port=WebAggApplication.port, prefix=WebAggApplication.url_prefix) if rcParams['webagg.open_in_browser']: import webbrowser webbrowser.open(url) else: print("To view figure, visit {0}".format(url)) WebAggApplication.start() show = Show().mainloop class ServerThread(threading.Thread): def run(self): tornado.ioloop.IOLoop.instance().start() webagg_server_thread = ServerThread() class FigureCanvasWebAgg(core.FigureCanvasWebAggCore): def show(self): # show the figure window show() def new_timer(self, *args, **kwargs): return TimerTornado(*args, **kwargs) def start_event_loop(self, timeout): backend_bases.FigureCanvasBase.start_event_loop_default( self, timeout) start_event_loop.__doc__ = \ backend_bases.FigureCanvasBase.start_event_loop_default.__doc__ def stop_event_loop(self): backend_bases.FigureCanvasBase.stop_event_loop_default(self) stop_event_loop.__doc__ = \ backend_bases.FigureCanvasBase.stop_event_loop_default.__doc__ class WebAggApplication(tornado.web.Application): initialized = False started = False class FavIcon(tornado.web.RequestHandler): def get(self): image_path = os.path.join( os.path.dirname(os.path.dirname(__file__)), 'mpl-data', 'images') self.set_header('Content-Type', 'image/png') with open(os.path.join(image_path, 'matplotlib.png'), 'rb') as fd: self.write(fd.read()) class SingleFigurePage(tornado.web.RequestHandler): def __init__(self, application, request, **kwargs): self.url_prefix = kwargs.pop('url_prefix', '') return tornado.web.RequestHandler.__init__(self, application, request, **kwargs) def get(self, fignum): fignum = int(fignum) manager = Gcf.get_fig_manager(fignum) ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request, prefix=self.url_prefix) self.render( "single_figure.html", prefix=self.url_prefix, ws_uri=ws_uri, fig_id=fignum, toolitems=core.NavigationToolbar2WebAgg.toolitems, canvas=manager.canvas) class AllFiguresPage(tornado.web.RequestHandler): def __init__(self, application, request, **kwargs): self.url_prefix = kwargs.pop('url_prefix', '') return tornado.web.RequestHandler.__init__(self, application, request, **kwargs) def get(self): ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request, prefix=self.url_prefix) self.render( "all_figures.html", prefix=self.url_prefix, ws_uri=ws_uri, figures=sorted( list(Gcf.figs.items()), key=lambda item: item[0]), toolitems=core.NavigationToolbar2WebAgg.toolitems) class MplJs(tornado.web.RequestHandler): def get(self): self.set_header('Content-Type', 'application/javascript') js_content = core.FigureManagerWebAgg.get_javascript() self.write(js_content) class Download(tornado.web.RequestHandler): def get(self, fignum, fmt): fignum = int(fignum) manager = Gcf.get_fig_manager(fignum) # TODO: Move this to a central location mimetypes = { 'ps': 'application/postscript', 'eps': 'application/postscript', 'pdf': 'application/pdf', 'svg': 'image/svg+xml', 'png': 'image/png', 'jpeg': 'image/jpeg', 'tif': 'image/tiff', 'emf': 'application/emf' } self.set_header('Content-Type', mimetypes.get(fmt, 'binary')) buff = six.BytesIO() manager.canvas.print_figure(buff, format=fmt) self.write(buff.getvalue()) class WebSocket(tornado.websocket.WebSocketHandler): supports_binary = True def open(self, fignum): self.fignum = int(fignum) self.manager = Gcf.get_fig_manager(self.fignum) self.manager.add_web_socket(self) if hasattr(self, 'set_nodelay'): self.set_nodelay(True) def on_close(self): self.manager.remove_web_socket(self) def on_message(self, message): message = json.loads(message) # The 'supports_binary' message is on a client-by-client # basis. The others affect the (shared) canvas as a # whole. if message['type'] == 'supports_binary': self.supports_binary = message['value'] else: manager = Gcf.get_fig_manager(self.fignum) # It is possible for a figure to be closed, # but a stale figure UI is still sending messages # from the browser. if manager is not None: manager.handle_json(message) def send_json(self, content): self.write_message(json.dumps(content)) def send_binary(self, blob): if self.supports_binary: self.write_message(blob, binary=True) else: data_uri = "data:image/png;base64,{0}".format( blob.encode('base64').replace('\n', '')) self.write_message(data_uri) def __init__(self, url_prefix=''): if url_prefix: assert url_prefix[0] == '/' and url_prefix[-1] != '/', \ 'url_prefix must start with a "/" and not end with one.' super(WebAggApplication, self).__init__( [ # Static files for the CSS and JS (url_prefix + r'/_static/(.*)', tornado.web.StaticFileHandler, {'path': core.FigureManagerWebAgg.get_static_file_path()}), # An MPL favicon (url_prefix + r'/favicon.ico', self.FavIcon), # The page that contains all of the pieces (url_prefix + r'/([0-9]+)', self.SingleFigurePage, {'url_prefix': url_prefix}), # The page that contains all of the figures (url_prefix + r'/?', self.AllFiguresPage, {'url_prefix': url_prefix}), (url_prefix + r'/mpl.js', self.MplJs), # Sends images and events to the browser, and receives # events from the browser (url_prefix + r'/([0-9]+)/ws', self.WebSocket), # Handles the downloading (i.e., saving) of static images (url_prefix + r'/([0-9]+)/download.([a-z0-9.]+)', self.Download), ], template_path=core.FigureManagerWebAgg.get_static_file_path()) @classmethod def initialize(cls, url_prefix='', port=None): if cls.initialized: return # Create the class instance app = cls(url_prefix=url_prefix) cls.url_prefix = url_prefix # This port selection algorithm is borrowed, more or less # verbatim, from IPython. def random_ports(port, n): """ Generate a list of n random ports near the given port. The first 5 ports will be sequential, and the remaining n-5 will be randomly selected in the range [port-2*n, port+2*n]. """ for i in range(min(5, n)): yield port + i for i in range(n - 5): yield port + random.randint(-2 * n, 2 * n) success = None cls.port = rcParams['webagg.port'] for port in random_ports(cls.port, rcParams['webagg.port_retries']): try: app.listen(port) except socket.error as e: if e.errno != errno.EADDRINUSE: raise else: cls.port = port success = True break if not success: raise SystemExit( "The webagg server could not be started because an available " "port could not be found") cls.initialized = True @classmethod def start(cls): if cls.started: return # Set the flag to True *before* blocking on IOLoop.instance().start() cls.started = True """ IOLoop.running() was removed as of Tornado 2.4; see for example https://groups.google.com/forum/#!topic/python-tornado/QLMzkpQBGOY Thus there is no correct way to check if the loop has already been launched. We may end up with two concurrently running loops in that unlucky case with all the expected consequences. """ print("Press Ctrl+C to stop WebAgg server") sys.stdout.flush() try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: print("Server is stopped") sys.stdout.flush() finally: cls.started = False def ipython_inline_display(figure): import tornado.template WebAggApplication.initialize() if not webagg_server_thread.is_alive(): webagg_server_thread.start() with open(os.path.join( core.FigureManagerWebAgg.get_static_file_path(), 'ipython_inline_figure.html')) as fd: tpl = fd.read() fignum = figure.number t = tornado.template.Template(tpl) return t.generate( prefix=WebAggApplication.url_prefix, fig_id=fignum, toolitems=core.NavigationToolbar2WebAgg.toolitems, canvas=figure.canvas, port=WebAggApplication.port).decode('utf-8') FigureCanvas = FigureCanvasWebAgg
gpl-3.0
nrhine1/scikit-learn
sklearn/utils/tests/test_random.py
230
7344
from __future__ import division import numpy as np import scipy.sparse as sp from scipy.misc import comb as combinations from numpy.testing import assert_array_almost_equal from sklearn.utils.random import sample_without_replacement from sklearn.utils.random import random_choice_csc from sklearn.utils.testing import ( assert_raises, assert_equal, assert_true) ############################################################################### # test custom sampling without replacement algorithm ############################################################################### def test_invalid_sample_without_replacement_algorithm(): assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown") def test_sample_without_replacement_algorithms(): methods = ("auto", "tracking_selection", "reservoir_sampling", "pool") for m in methods: def sample_without_replacement_method(n_population, n_samples, random_state=None): return sample_without_replacement(n_population, n_samples, method=m, random_state=random_state) check_edge_case_of_sample_int(sample_without_replacement_method) check_sample_int(sample_without_replacement_method) check_sample_int_distribution(sample_without_replacement_method) def check_edge_case_of_sample_int(sample_without_replacement): # n_poluation < n_sample assert_raises(ValueError, sample_without_replacement, 0, 1) assert_raises(ValueError, sample_without_replacement, 1, 2) # n_population == n_samples assert_equal(sample_without_replacement(0, 0).shape, (0, )) assert_equal(sample_without_replacement(1, 1).shape, (1, )) # n_population >= n_samples assert_equal(sample_without_replacement(5, 0).shape, (0, )) assert_equal(sample_without_replacement(5, 1).shape, (1, )) # n_population < 0 or n_samples < 0 assert_raises(ValueError, sample_without_replacement, -1, 5) assert_raises(ValueError, sample_without_replacement, 5, -1) def check_sample_int(sample_without_replacement): # This test is heavily inspired from test_random.py of python-core. # # For the entire allowable range of 0 <= k <= N, validate that # the sample is of the correct length and contains only unique items n_population = 100 for n_samples in range(n_population + 1): s = sample_without_replacement(n_population, n_samples) assert_equal(len(s), n_samples) unique = np.unique(s) assert_equal(np.size(unique), n_samples) assert_true(np.all(unique < n_population)) # test edge case n_population == n_samples == 0 assert_equal(np.size(sample_without_replacement(0, 0)), 0) def check_sample_int_distribution(sample_without_replacement): # This test is heavily inspired from test_random.py of python-core. # # For the entire allowable range of 0 <= k <= N, validate that # sample generates all possible permutations n_population = 10 # a large number of trials prevents false negatives without slowing normal # case n_trials = 10000 for n_samples in range(n_population): # Counting the number of combinations is not as good as counting the # the number of permutations. However, it works with sampling algorithm # that does not provide a random permutation of the subset of integer. n_expected = combinations(n_population, n_samples, exact=True) output = {} for i in range(n_trials): output[frozenset(sample_without_replacement(n_population, n_samples))] = None if len(output) == n_expected: break else: raise AssertionError( "number of combinations != number of expected (%s != %s)" % (len(output), n_expected)) def test_random_choice_csc(n_samples=10000, random_state=24): # Explicit class probabilities classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] got = random_choice_csc(n_samples, classes, class_probabilites, random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) assert_array_almost_equal(class_probabilites[k], p, decimal=1) # Implicit class probabilities classes = [[0, 1], [1, 2]] # test for array-like support class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])] got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) assert_array_almost_equal(class_probabilites[k], p, decimal=1) # Edge case proabilites 1.0 and 0.0 classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])] got = random_choice_csc(n_samples, classes, class_probabilites, random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel(), minlength=len(class_probabilites[k])) / n_samples assert_array_almost_equal(class_probabilites[k], p, decimal=1) # One class target data classes = [[1], [0]] # test for array-like support class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])] got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples assert_array_almost_equal(class_probabilites[k], p, decimal=1) def test_random_choice_csc_errors(): # the length of an array in classes and class_probabilites is mismatched classes = [np.array([0, 1]), np.array([0, 1, 2, 3])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # the class dtype is not supported classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # the class dtype is not supported classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # Given proabilites don't sum to 1 classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1)
bsd-3-clause
shikhardb/scikit-learn
examples/svm/plot_iris.py
62
3251
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problem. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
bsd-3-clause
0asa/scikit-learn
sklearn/ensemble/partial_dependence.py
36
14909
"""Partial dependence plots for tree ensembles. """ # Authors: Peter Prettenhofer # License: BSD 3 clause from itertools import count import numbers import numpy as np from scipy.stats.mstats import mquantiles from ..utils.extmath import cartesian from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import map, range, zip from ..utils import check_array from ..tree._tree import DTYPE from ._gradient_boosting import _partial_dependence_tree from .gradient_boosting import BaseGradientBoosting def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100): """Generate a grid of points based on the ``percentiles of ``X``. The grid is generated by placing ``grid_resolution`` equally spaced points between the ``percentiles`` of each column of ``X``. Parameters ---------- X : ndarray The data percentiles : tuple of floats The percentiles which are used to construct the extreme values of the grid axes. grid_resolution : int The number of equally spaced points that are placed on the grid. Returns ------- grid : ndarray All data points on the grid; ``grid.shape[1] == X.shape[1]`` and ``grid.shape[0] == grid_resolution * X.shape[1]``. axes : seq of ndarray The axes with which the grid has been created. """ if len(percentiles) != 2: raise ValueError('percentile must be tuple of len 2') if not all(0. <= x <= 1. for x in percentiles): raise ValueError('percentile values must be in [0, 1]') axes = [] for col in range(X.shape[1]): uniques = np.unique(X[:, col]) if uniques.shape[0] < grid_resolution: # feature has low resolution use unique vals axis = uniques else: emp_percentiles = mquantiles(X, prob=percentiles, axis=0) # create axis based on percentiles and grid resolution axis = np.linspace(emp_percentiles[0, col], emp_percentiles[1, col], num=grid_resolution, endpoint=True) axes.append(axis) return cartesian(axes), axes def partial_dependence(gbrt, target_variables, grid=None, X=None, percentiles=(0.05, 0.95), grid_resolution=100): """Partial dependence of ``target_variables``. Partial dependence plots show the dependence between the joint values of the ``target_variables`` and the function represented by the ``gbrt``. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. target_variables : array-like, dtype=int The target features for which the partial dependecy should be computed (size should be smaller than 3 for visual renderings). grid : array-like, shape=(n_points, len(target_variables)) The grid of ``target_variables`` values for which the partial dependecy should be evaluated (either ``grid`` or ``X`` must be specified). X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. It is used to generate a ``grid`` for the ``target_variables``. The ``grid`` comprises ``grid_resolution`` equally spaced points between the two ``percentiles``. percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the ``grid``. Only if ``X`` is not None. grid_resolution : int, default=100 The number of equally spaced points on the ``grid``. Returns ------- pdp : array, shape=(n_classes, n_points) The partial dependence function evaluated on the ``grid``. For regression and binary classification ``n_classes==1``. axes : seq of ndarray or None The axes with which the grid has been created or None if the grid has been given. Examples -------- >>> samples = [[0, 0, 2], [1, 0, 0]] >>> labels = [0, 1] >>> from sklearn.ensemble import GradientBoostingClassifier >>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels) >>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2) >>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) """ if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) if (grid is None and X is None) or (grid is not None and X is not None): raise ValueError('Either grid or X must be specified') target_variables = np.asarray(target_variables, dtype=np.int32, order='C').ravel() if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]): raise ValueError('target_variables must be in [0, %d]' % (gbrt.n_features - 1)) if X is not None: X = check_array(X, dtype=DTYPE, order='C') grid, axes = _grid_from_X(X[:, target_variables], percentiles, grid_resolution) else: assert grid is not None # dont return axes if grid is given axes = None # grid must be 2d if grid.ndim == 1: grid = grid[:, np.newaxis] if grid.ndim != 2: raise ValueError('grid must be 2d but is %dd' % grid.ndim) grid = np.asarray(grid, dtype=DTYPE, order='C') assert grid.shape[1] == target_variables.shape[0] n_trees_per_stage = gbrt.estimators_.shape[1] n_estimators = gbrt.estimators_.shape[0] pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64, order='C') for stage in range(n_estimators): for k in range(n_trees_per_stage): tree = gbrt.estimators_[stage, k].tree_ _partial_dependence_tree(tree, grid, target_variables, gbrt.learning_rate, pdp[k]) return pdp, axes def plot_partial_dependence(gbrt, X, features, feature_names=None, label=None, n_cols=3, grid_resolution=100, percentiles=(0.05, 0.95), n_jobs=1, verbose=0, ax=None, line_kw=None, contour_kw=None, **fig_kw): """Partial dependence plots for ``features``. The ``len(features)`` plots are arranged in a grid with ``n_cols`` columns. Two-way partial dependence plots are plotted as contour plots. Parameters ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. X : array-like, shape=(n_samples, n_features) The data on which ``gbrt`` was trained. features : seq of tuples or ints If seq[i] is an int or a tuple with one int value, a one-way PDP is created; if seq[i] is a tuple of two ints, a two-way PDP is created. feature_names : seq of str Name of each feature; feature_names[i] holds the name of the feature with index i. label : object The class label for which the PDPs should be computed. Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``. n_cols : int The number of columns in the grid plot (default: 3). percentiles : (low, high), default=(0.05, 0.95) The lower and upper percentile used create the extreme values for the PDP axes. grid_resolution : int, default=100 The number of equally spaced points on the axes. n_jobs : int The number of CPUs to use to compute the PDs. -1 means 'all CPUs'. Defaults to 1. verbose : int Verbose output during PD computations. Defaults to 0. ax : Matplotlib axis object, default None An axis object onto which the plots will be drawn. line_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For one-way partial dependence plots. contour_kw : dict Dict with keywords passed to the ``pylab.plot`` call. For two-way partial dependence plots. fig_kw : dict Dict with keywords passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns ------- fig : figure The Matplotlib Figure object. axs : seq of Axis objects A seq of Axis objects, one for each subplot. Examples -------- >>> from sklearn.datasets import make_friedman1 >>> from sklearn.ensemble import GradientBoostingRegressor >>> X, y = make_friedman1() >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) >>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP ... """ import matplotlib.pyplot as plt from matplotlib import transforms from matplotlib.ticker import MaxNLocator from matplotlib.ticker import ScalarFormatter if not isinstance(gbrt, BaseGradientBoosting): raise ValueError('gbrt has to be an instance of BaseGradientBoosting') if gbrt.estimators_.shape[0] == 0: raise ValueError('Call %s.fit before partial_dependence' % gbrt.__class__.__name__) # set label_idx for multi-class GBRT if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2: if label is None: raise ValueError('label is not given for multi-class PDP') label_idx = np.searchsorted(gbrt.classes_, label) if gbrt.classes_[label_idx] != label: raise ValueError('label %s not in ``gbrt.classes_``' % str(label)) else: # regression and binary classification label_idx = 0 X = check_array(X, dtype=DTYPE, order='C') if gbrt.n_features != X.shape[1]: raise ValueError('X.shape[1] does not match gbrt.n_features') if line_kw is None: line_kw = {'color': 'green'} if contour_kw is None: contour_kw = {} # convert feature_names to list if feature_names is None: # if not feature_names use fx indices as name feature_names = [str(i) for i in range(gbrt.n_features)] elif isinstance(feature_names, np.ndarray): feature_names = feature_names.tolist() def convert_feature(fx): if isinstance(fx, six.string_types): try: fx = feature_names.index(fx) except ValueError: raise ValueError('Feature %s not in feature_names' % fx) return fx # convert features into a seq of int tuples tmp_features = [] for fxs in features: if isinstance(fxs, (numbers.Integral,) + six.string_types): fxs = (fxs,) try: fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32) except TypeError: raise ValueError('features must be either int, str, or tuple ' 'of int/str') if not (1 <= np.size(fxs) <= 2): raise ValueError('target features must be either one or two') tmp_features.append(fxs) features = tmp_features names = [] try: for fxs in features: l = [] # explicit loop so "i" is bound for exception below for i in fxs: l.append(feature_names[i]) names.append(l) except IndexError: raise ValueError('features[i] must be in [0, n_features) ' 'but was %d' % i) # compute PD functions pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(partial_dependence)(gbrt, fxs, X=X, grid_resolution=grid_resolution) for fxs in features) # get global min and max values of PD grouped by plot type pdp_lim = {} for pdp, axes in pd_result: min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max() n_fx = len(axes) old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) # create contour levels for two-way plots if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) if ax is None: fig = plt.figure(**fig_kw) else: fig = ax.get_figure() fig.clear() n_cols = min(n_cols, len(features)) n_rows = int(np.ceil(len(features) / float(n_cols))) axs = [] for i, fx, name, (pdp, axes) in zip(count(), features, names, pd_result): ax = fig.add_subplot(n_rows, n_cols, i + 1) if len(axes) == 1: ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw) else: # make contour plot assert len(axes) == 2 XX, YY = np.meshgrid(axes[0], axes[1]) Z = pdp[label_idx].reshape(list(map(np.size, axes))).T CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors='k') ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], alpha=0.75, **contour_kw) ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True) # plot data deciles + axes labels deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) ylim = ax.get_ylim() ax.vlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_xlabel(name[0]) ax.set_ylim(ylim) # prevent x-axis ticks from overlapping ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower')) tick_formatter = ScalarFormatter() tick_formatter.set_powerlimits((-3, 4)) ax.xaxis.set_major_formatter(tick_formatter) if len(axes) > 1: # two-way PDP - y-axis deciles + labels deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1)) trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) xlim = ax.get_xlim() ax.hlines(deciles, [0], 0.05, transform=trans, color='k') ax.set_ylabel(name[1]) # hline erases xlim ax.set_xlim(xlim) else: ax.set_ylabel('Partial dependence') if len(axes) == 1: ax.set_ylim(pdp_lim[1]) axs.append(ax) fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4, hspace=0.3) return fig, axs
bsd-3-clause
karstenw/nodebox-pyobjc
examples/Extended Application/matplotlib/examples/mplot3d/contour3d_3.py
1
1072
''' ======================================== Projecting contour profiles onto a graph ======================================== Demonstrates displaying a 3D surface while also projecting contour 'profiles' onto the 'walls' of the graph. See contourf3d_demo2 for the filled version. ''' from mpl_toolkits.mplot3d import axes3d import matplotlib.pyplot as plt from matplotlib import cm fig = plt.figure() ax = fig.gca(projection='3d') X, Y, Z = axes3d.get_test_data(0.05) # Plot the 3D surface ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3) # Plot projections of the contours for each dimension. By choosing offsets # that match the appropriate axes limits, the projected contours will sit on # the 'walls' of the graph cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm) cset = ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm) cset = ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm) ax.set_xlim(-40, 40) ax.set_ylim(-40, 40) ax.set_zlim(-100, 100) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show()
mit
waterponey/scikit-learn
examples/applications/plot_out_of_core_classification.py
51
13651
""" ====================================================== Out-of-core classification of text documents ====================================================== This is an example showing how scikit-learn can be used for classification using an out-of-core approach: learning from data that doesn't fit into main memory. We make use of an online classifier, i.e., one that supports the partial_fit method, that will be fed with batches of examples. To guarantee that the features space remains the same over time we leverage a HashingVectorizer that will project each example into the same feature space. This is especially useful in the case of text classification where new features (words) may appear in each batch. The dataset used in this example is Reuters-21578 as provided by the UCI ML repository. It will be automatically downloaded and uncompressed on first run. The plot represents the learning curve of the classifier: the evolution of classification accuracy over the course of the mini-batches. Accuracy is measured on the first 1000 samples, held out as a validation set. To limit the memory consumption, we queue examples up to a fixed amount before feeding them to the learner. """ # Authors: Eustache Diemert <eustache@diemert.fr> # @FedericoV <https://github.com/FedericoV/> # License: BSD 3 clause from __future__ import print_function from glob import glob import itertools import os.path import re import tarfile import time import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams from sklearn.externals.six.moves import html_parser from sklearn.externals.six.moves import urllib from sklearn.datasets import get_data_home from sklearn.feature_extraction.text import HashingVectorizer from sklearn.linear_model import SGDClassifier from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import Perceptron from sklearn.naive_bayes import MultinomialNB def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() ############################################################################### # Reuters Dataset related routines # -------------------------------- # class ReutersParser(html_parser.HTMLParser): """Utility class to parse a SGML file and yield documents one at a time.""" def __init__(self, encoding='latin-1'): html_parser.HTMLParser.__init__(self) self._reset() self.encoding = encoding def handle_starttag(self, tag, attrs): method = 'start_' + tag getattr(self, method, lambda x: None)(attrs) def handle_endtag(self, tag): method = 'end_' + tag getattr(self, method, lambda: None)() def _reset(self): self.in_title = 0 self.in_body = 0 self.in_topics = 0 self.in_topic_d = 0 self.title = "" self.body = "" self.topics = [] self.topic_d = "" def parse(self, fd): self.docs = [] for chunk in fd: self.feed(chunk.decode(self.encoding)) for doc in self.docs: yield doc self.docs = [] self.close() def handle_data(self, data): if self.in_body: self.body += data elif self.in_title: self.title += data elif self.in_topic_d: self.topic_d += data def start_reuters(self, attributes): pass def end_reuters(self): self.body = re.sub(r'\s+', r' ', self.body) self.docs.append({'title': self.title, 'body': self.body, 'topics': self.topics}) self._reset() def start_title(self, attributes): self.in_title = 1 def end_title(self): self.in_title = 0 def start_body(self, attributes): self.in_body = 1 def end_body(self): self.in_body = 0 def start_topics(self, attributes): self.in_topics = 1 def end_topics(self): self.in_topics = 0 def start_d(self, attributes): self.in_topic_d = 1 def end_d(self): self.in_topic_d = 0 self.topics.append(self.topic_d) self.topic_d = "" def stream_reuters_documents(data_path=None): """Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys. """ DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/' 'reuters21578-mld/reuters21578.tar.gz') ARCHIVE_FILENAME = 'reuters21578.tar.gz' if data_path is None: data_path = os.path.join(get_data_home(), "reuters") if not os.path.exists(data_path): """Download the dataset.""" print("downloading dataset (once and for all) into %s" % data_path) os.mkdir(data_path) def progress(blocknum, bs, size): total_sz_mb = '%.2f MB' % (size / 1e6) current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6) if _not_in_sphinx(): print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb), end='') archive_path = os.path.join(data_path, ARCHIVE_FILENAME) urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress) if _not_in_sphinx(): print('\r', end='') print("untarring Reuters dataset...") tarfile.open(archive_path, 'r:gz').extractall(data_path) print("done.") parser = ReutersParser() for filename in glob(os.path.join(data_path, "*.sgm")): for doc in parser.parse(open(filename, 'rb')): yield doc ############################################################################### # Main # ---- # # Create the vectorizer and limit the number of features to a reasonable # maximum vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18, non_negative=True) # Iterator over parsed Reuters SGML files. data_stream = stream_reuters_documents() # We learn a binary classification between the "acq" class and all the others. # "acq" was chosen as it is more or less evenly distributed in the Reuters # files. For other datasets, one should take care of creating a test set with # a realistic portion of positive instances. all_classes = np.array([0, 1]) positive_class = 'acq' # Here are some classifiers that support the `partial_fit` method partial_fit_classifiers = { 'SGD': SGDClassifier(), 'Perceptron': Perceptron(), 'NB Multinomial': MultinomialNB(alpha=0.01), 'Passive-Aggressive': PassiveAggressiveClassifier(), } def get_minibatch(doc_iter, size, pos_class=positive_class): """Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned. """ data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics']) for doc in itertools.islice(doc_iter, size) if doc['topics']] if not len(data): return np.asarray([], dtype=int), np.asarray([], dtype=int) X_text, y = zip(*data) return X_text, np.asarray(y, dtype=int) def iter_minibatches(doc_iter, minibatch_size): """Generator of minibatches.""" X_text, y = get_minibatch(doc_iter, minibatch_size) while len(X_text): yield X_text, y X_text, y = get_minibatch(doc_iter, minibatch_size) # test data statistics test_stats = {'n_test': 0, 'n_test_pos': 0} # First we hold out a number of examples to estimate accuracy n_test_documents = 1000 tick = time.time() X_test_text, y_test = get_minibatch(data_stream, 1000) parsing_time = time.time() - tick tick = time.time() X_test = vectorizer.transform(X_test_text) vectorizing_time = time.time() - tick test_stats['n_test'] += len(y_test) test_stats['n_test_pos'] += sum(y_test) print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test))) def progress(cls_name, stats): """Report progress information, return a string.""" duration = time.time() - stats['t0'] s = "%20s classifier : \t" % cls_name s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats s += "accuracy: %(accuracy).3f " % stats s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration) return s cls_stats = {} for cls_name in partial_fit_classifiers: stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(), 'runtime_history': [(0, 0)], 'total_fit_time': 0.0} cls_stats[cls_name] = stats get_minibatch(data_stream, n_test_documents) # Discard test set # We will feed the classifier with mini-batches of 1000 documents; this means # we have at most 1000 docs in memory at any time. The smaller the document # batch, the bigger the relative overhead of the partial fit methods. minibatch_size = 1000 # Create the data_stream that parses Reuters SGML files and iterates on # documents as a stream. minibatch_iterators = iter_minibatches(data_stream, minibatch_size) total_vect_time = 0.0 # Main loop : iterate on mini-batches of examples for i, (X_train_text, y_train) in enumerate(minibatch_iterators): tick = time.time() X_train = vectorizer.transform(X_train_text) total_vect_time += time.time() - tick for cls_name, cls in partial_fit_classifiers.items(): tick = time.time() # update estimator with examples in the current mini-batch cls.partial_fit(X_train, y_train, classes=all_classes) # accumulate test accuracy stats cls_stats[cls_name]['total_fit_time'] += time.time() - tick cls_stats[cls_name]['n_train'] += X_train.shape[0] cls_stats[cls_name]['n_train_pos'] += sum(y_train) tick = time.time() cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test) cls_stats[cls_name]['prediction_time'] = time.time() - tick acc_history = (cls_stats[cls_name]['accuracy'], cls_stats[cls_name]['n_train']) cls_stats[cls_name]['accuracy_history'].append(acc_history) run_history = (cls_stats[cls_name]['accuracy'], total_vect_time + cls_stats[cls_name]['total_fit_time']) cls_stats[cls_name]['runtime_history'].append(run_history) if i % 3 == 0: print(progress(cls_name, cls_stats[cls_name])) if i % 3 == 0: print('\n') ############################################################################### # Plot results # ------------ def plot_accuracy(x, y, x_legend): """Plot accuracy as a function of x.""" x = np.array(x) y = np.array(y) plt.title('Classification accuracy as a function of %s' % x_legend) plt.xlabel('%s' % x_legend) plt.ylabel('Accuracy') plt.grid(True) plt.plot(x, y) rcParams['legend.fontsize'] = 10 cls_names = list(sorted(cls_stats.keys())) # Plot accuracy evolution plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with #examples accuracy, n_examples = zip(*stats['accuracy_history']) plot_accuracy(n_examples, accuracy, "training examples (#)") ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with runtime accuracy, runtime = zip(*stats['runtime_history']) plot_accuracy(runtime, accuracy, 'runtime (s)') ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc='best') # Plot fitting times plt.figure() fig = plt.gcf() cls_runtime = [] for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['total_fit_time']) cls_runtime.append(total_vect_time) cls_names.append('Vectorization') bar_colors = ['b', 'g', 'r', 'c', 'm', 'y'] ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=10) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Training Times') def autolabel(rectangles): """attach some text vi autolabel on rectangles.""" for rect in rectangles: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, '%.4f' % height, ha='center', va='bottom') autolabel(rectangles) plt.show() # Plot prediction times plt.figure() cls_runtime = [] cls_names = list(sorted(cls_stats.keys())) for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats['prediction_time']) cls_runtime.append(parsing_time) cls_names.append('Read/Parse\n+Feat.Extr.') cls_runtime.append(vectorizing_time) cls_names.append('Hashing\n+Vect.') ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=8) plt.setp(plt.xticks()[1], rotation=30) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('runtime (s)') ax.set_title('Prediction Times (%d instances)' % n_test_documents) autolabel(rectangles) plt.show()
bsd-3-clause
endolith/scikit-image
skimage/io/manage_plugins.py
17
10353
"""Handle image reading, writing and plotting plugins. To improve performance, plugins are only loaded as needed. As a result, there can be multiple states for a given plugin: available: Defined in an *ini file located in `skimage.io._plugins`. See also `skimage.io.available_plugins`. partial definition: Specified in an *ini file, but not defined in the corresponding plugin module. This will raise an error when loaded. available but not on this system: Defined in `skimage.io._plugins`, but a dependent library (e.g. Qt, PIL) is not available on your system. This will raise an error when loaded. loaded: The real availability is determined when it's explicitly loaded, either because it's one of the default plugins, or because it's loaded explicitly by the user. """ import sys if sys.version.startswith('3'): from configparser import ConfigParser # Python 3 else: from ConfigParser import ConfigParser # Python 2 import os.path from glob import glob from .collection import imread_collection_wrapper __all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order', 'reset_plugins', 'find_available_plugins', 'available_plugins'] # The plugin store will save a list of *loaded* io functions for each io type # (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested. plugin_store = None # Dictionary mapping plugin names to a list of functions they provide. plugin_provides = {} # The module names for the plugins in `skimage.io._plugins`. plugin_module_name = {} # Meta-data about plugins provided by *.ini files. plugin_meta_data = {} # For each plugin type, default to the first available plugin as defined by # the following preferences. preferred_plugins = { # Default plugins for all types (overridden by specific types below). 'all': ['pil', 'matplotlib', 'qt', 'freeimage'], 'imshow': ['matplotlib'] } def _clear_plugins(): """Clear the plugin state to the default, i.e., where no plugins are loaded """ global plugin_store plugin_store = {'imread': [], 'imsave': [], 'imshow': [], 'imread_collection': [], '_app_show': []} _clear_plugins() def _load_preferred_plugins(): # Load preferred plugin for each io function. io_types = ['imsave', 'imshow', 'imread_collection', 'imread'] for p_type in io_types: _set_plugin(p_type, preferred_plugins['all']) plugin_types = (p for p in preferred_plugins.keys() if p != 'all') for p_type in plugin_types: _set_plugin(p_type, preferred_plugins[p_type]) def _set_plugin(plugin_type, plugin_list): for plugin in plugin_list: if plugin not in available_plugins: continue try: use_plugin(plugin, kind=plugin_type) break except (ImportError, RuntimeError, OSError): pass def reset_plugins(): _clear_plugins() _load_preferred_plugins() def _parse_config_file(filename): """Return plugin name and meta-data dict from plugin config file.""" parser = ConfigParser() parser.read(filename) name = parser.sections()[0] meta_data = {} for opt in parser.options(name): meta_data[opt] = parser.get(name, opt) return name, meta_data def _scan_plugins(): """Scan the plugins directory for .ini files and parse them to gather plugin meta-data. """ pd = os.path.dirname(__file__) config_files = glob(os.path.join(pd, '_plugins', '*.ini')) for filename in config_files: name, meta_data = _parse_config_file(filename) plugin_meta_data[name] = meta_data provides = [s.strip() for s in meta_data['provides'].split(',')] valid_provides = [p for p in provides if p in plugin_store] for p in provides: if not p in plugin_store: print("Plugin `%s` wants to provide non-existent `%s`." " Ignoring." % (name, p)) # Add plugins that provide 'imread' as provider of 'imread_collection'. need_to_add_collection = ('imread_collection' not in valid_provides and 'imread' in valid_provides) if need_to_add_collection: valid_provides.append('imread_collection') plugin_provides[name] = valid_provides plugin_module_name[name] = os.path.basename(filename)[:-4] _scan_plugins() def find_available_plugins(loaded=False): """List available plugins. Parameters ---------- loaded : bool If True, show only those plugins currently loaded. By default, all plugins are shown. Returns ------- p : dict Dictionary with plugin names as keys and exposed functions as values. """ active_plugins = set() for plugin_func in plugin_store.values(): for plugin, func in plugin_func: active_plugins.add(plugin) d = {} for plugin in plugin_provides: if not loaded or plugin in active_plugins: d[plugin] = [f for f in plugin_provides[plugin] if not f.startswith('_')] return d available_plugins = find_available_plugins() def call_plugin(kind, *args, **kwargs): """Find the appropriate plugin of 'kind' and execute it. Parameters ---------- kind : {'imshow', 'imsave', 'imread', 'imread_collection'} Function to look up. plugin : str, optional Plugin to load. Defaults to None, in which case the first matching plugin is used. *args, **kwargs : arguments and keyword arguments Passed to the plugin function. """ if not kind in plugin_store: raise ValueError('Invalid function (%s) requested.' % kind) plugin_funcs = plugin_store[kind] if len(plugin_funcs) == 0: msg = ("No suitable plugin registered for %s.\n\n" "You may load I/O plugins with the `skimage.io.use_plugin` " "command. A list of all available plugins can be found using " "`skimage.io.plugins()`.") raise RuntimeError(msg % kind) plugin = kwargs.pop('plugin', None) if plugin is None: _, func = plugin_funcs[0] else: _load(plugin) try: func = [f for (p, f) in plugin_funcs if p == plugin][0] except IndexError: raise RuntimeError('Could not find the plugin "%s" for %s.' % (plugin, kind)) return func(*args, **kwargs) def use_plugin(name, kind=None): """Set the default plugin for a specified operation. The plugin will be loaded if it hasn't been already. Parameters ---------- name : str Name of plugin. kind : {'imsave', 'imread', 'imshow', 'imread_collection'}, optional Set the plugin for this function. By default, the plugin is set for all functions. See Also -------- available_plugins : List of available plugins Examples -------- To use Matplotlib as the default image reader, you would write: >>> from skimage import io >>> io.use_plugin('matplotlib', 'imread') To see a list of available plugins run ``io.available_plugins``. Note that this lists plugins that are defined, but the full list may not be usable if your system does not have the required libraries installed. """ if kind is None: kind = plugin_store.keys() else: if not kind in plugin_provides[name]: raise RuntimeError("Plugin %s does not support `%s`." % (name, kind)) if kind == 'imshow': kind = [kind, '_app_show'] else: kind = [kind] _load(name) for k in kind: if not k in plugin_store: raise RuntimeError("'%s' is not a known plugin function." % k) funcs = plugin_store[k] # Shuffle the plugins so that the requested plugin stands first # in line funcs = [(n, f) for (n, f) in funcs if n == name] + \ [(n, f) for (n, f) in funcs if n != name] plugin_store[k] = funcs def _inject_imread_collection_if_needed(module): """Add `imread_collection` to module if not already present.""" if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'): imread = getattr(module, 'imread') func = imread_collection_wrapper(imread) setattr(module, 'imread_collection', func) def _load(plugin): """Load the given plugin. Parameters ---------- plugin : str Name of plugin to load. See Also -------- plugins : List of available plugins """ if plugin in find_available_plugins(loaded=True): return if not plugin in plugin_module_name: raise ValueError("Plugin %s not found." % plugin) else: modname = plugin_module_name[plugin] plugin_module = __import__('skimage.io._plugins.' + modname, fromlist=[modname]) provides = plugin_provides[plugin] for p in provides: if p == 'imread_collection': _inject_imread_collection_if_needed(plugin_module) elif not hasattr(plugin_module, p): print("Plugin %s does not provide %s as advertised. Ignoring." % (plugin, p)) continue store = plugin_store[p] func = getattr(plugin_module, p) if not (plugin, func) in store: store.append((plugin, func)) def plugin_info(plugin): """Return plugin meta-data. Parameters ---------- plugin : str Name of plugin. Returns ------- m : dict Meta data as specified in plugin ``.ini``. """ try: return plugin_meta_data[plugin] except KeyError: raise ValueError('No information on plugin "%s"' % plugin) def plugin_order(): """Return the currently preferred plugin order. Returns ------- p : dict Dictionary of preferred plugin order, with function name as key and plugins (in order of preference) as value. """ p = {} for func in plugin_store: p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]] return p
bsd-3-clause
moutai/scikit-learn
sklearn/datasets/tests/test_kddcup99.py
59
1336
"""Test kddcup99 loader. Only 'percent10' mode is tested, as the full data is too big to use in unit-testing. The test is skipped if the data wasn't previously fetched and saved to scikit-learn data folder. """ import errno from sklearn.datasets import fetch_kddcup99 from sklearn.utils.testing import assert_equal, SkipTest def test_percent10(): try: data = fetch_kddcup99(download_if_missing=False) except IOError as e: if e.errno == errno.ENOENT: raise SkipTest("kddcup99 dataset can not be loaded.") assert_equal(data.data.shape, (494021, 41)) assert_equal(data.target.shape, (494021,)) data_shuffled = fetch_kddcup99(shuffle=True, random_state=0) assert_equal(data.data.shape, data_shuffled.data.shape) assert_equal(data.target.shape, data_shuffled.target.shape) data = fetch_kddcup99('SA') assert_equal(data.data.shape, (100655, 41)) assert_equal(data.target.shape, (100655,)) data = fetch_kddcup99('SF') assert_equal(data.data.shape, (73237, 4)) assert_equal(data.target.shape, (73237,)) data = fetch_kddcup99('http') assert_equal(data.data.shape, (58725, 3)) assert_equal(data.target.shape, (58725,)) data = fetch_kddcup99('smtp') assert_equal(data.data.shape, (9571, 3)) assert_equal(data.target.shape, (9571,))
bsd-3-clause
shenzebang/scikit-learn
doc/datasets/mldata_fixture.py
367
1183
"""Fixture module to skip the datasets loading when offline Mock urllib2 access to mldata.org and create a temporary data folder. """ from os import makedirs from os.path import join import numpy as np import tempfile import shutil from sklearn import datasets from sklearn.utils.testing import install_mldata_mock from sklearn.utils.testing import uninstall_mldata_mock def globs(globs): # Create a temporary folder for the data fetcher global custom_data_home custom_data_home = tempfile.mkdtemp() makedirs(join(custom_data_home, 'mldata')) globs['custom_data_home'] = custom_data_home return globs def setup_module(): # setup mock urllib2 module to avoid downloading from mldata.org install_mldata_mock({ 'mnist-original': { 'data': np.empty((70000, 784)), 'label': np.repeat(np.arange(10, dtype='d'), 7000), }, 'iris': { 'data': np.empty((150, 4)), }, 'datasets-uci-iris': { 'double0': np.empty((150, 4)), 'class': np.empty((150,)), }, }) def teardown_module(): uninstall_mldata_mock() shutil.rmtree(custom_data_home)
bsd-3-clause
weaver-viii/h2o-3
h2o-py/tests/testdir_algos/glm/pyunit_link_functions_binomialGLM.py
3
1383
import sys sys.path.insert(1, "../../../") import h2o import pandas as pd import zipfile import statsmodels.api as sm def link_functions_binomial(ip,port): print("Read in prostate data.") h2o_data = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate_complete.csv.zip")) h2o_data.head() sm_data = pd.read_csv(zipfile.ZipFile(h2o.locate("smalldata/prostate/prostate_complete.csv.zip")).open("prostate_complete.csv")).as_matrix() sm_data_response = sm_data[:,2] sm_data_features = sm_data[:,[1,3,4,5,6,7,8,9]] print("Testing for family: BINOMIAL") print("Set variables for h2o.") myY = "CAPSULE" myX = ["ID","AGE","RACE","GLEASON","DCAPS","PSA","VOL","DPROS"] print("Create models with canonical link: LOGIT") h2o_model = h2o.glm(x=h2o_data[myX], y=h2o_data[myY].asfactor(), family="binomial", link="logit",alpha=[0.5], Lambda=[0]) sm_model = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Binomial(sm.families.links.logit)).fit() print("Compare model deviances for link function logit") h2o_deviance = h2o_model.residual_deviance() / h2o_model.null_deviance() sm_deviance = sm_model.deviance / sm_model.null_deviance assert h2o_deviance - sm_deviance < 0.01, "expected h2o to have an equivalent or better deviance measures" if __name__ == "__main__": h2o.run_test(sys.argv, link_functions_binomial)
apache-2.0
jaidevd/scikit-learn
sklearn/gaussian_process/tests/test_gaussian_process.py
46
7057
""" Testing for Gaussian Process module (sklearn.gaussian_process) """ # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # License: BSD 3 clause import numpy as np from sklearn.gaussian_process import GaussianProcess from sklearn.gaussian_process import regression_models as regression from sklearn.gaussian_process import correlation_models as correlation from sklearn.datasets import make_regression from sklearn.utils.testing import assert_greater, assert_true, raises f = lambda x: x * np.sin(x) X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T y = f(X).ravel() def test_1d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a one-dimensional Gaussian Process model. # Check random start optimization. # Test the interpolating property. gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=random_start, verbose=False).fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) y2_pred, MSE2 = gp.predict(X2, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.) and np.allclose(MSE2, 0., atol=10)) def test_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a two-dimensional Gaussian Process model accounting for # anisotropy. Check random start optimization. # Test the interpolating property. b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = g(X).ravel() thetaL = [1e-4] * 2 thetaU = [1e-1] * 2 gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=thetaL, thetaU=thetaU, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) eps = np.finfo(gp.theta_.dtype).eps assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential, random_start=10, beta0=None): # MLE estimation of a two-dimensional Gaussian Process model accounting for # anisotropy. Check random start optimization. # Test the GP interpolation for 2D output b, kappa, e = 5., .5, .1 g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2. f = lambda x: np.vstack((g(x), g(x))).T X = np.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) y = f(X) gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0, theta0=[1e-2] * 2, thetaL=[1e-4] * 2, thetaU=[1e-1] * 2, random_start=random_start, verbose=False) gp.fit(X, y) y_pred, MSE = gp.predict(X, eval_MSE=True) assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) @raises(ValueError) def test_wrong_number_of_outputs(): gp = GaussianProcess() gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3]) def test_more_builtin_correlation_models(random_start=1): # Repeat test_1d and test_2d for several built-in correlation # models specified as strings. all_corr = ['absolute_exponential', 'squared_exponential', 'cubic', 'linear'] for corr in all_corr: test_1d(regr='constant', corr=corr, random_start=random_start) test_2d(regr='constant', corr=corr, random_start=random_start) test_2d_2d(regr='constant', corr=corr, random_start=random_start) def test_ordinary_kriging(): # Repeat test_1d and test_2d with given regression weights (beta0) for # different regression models (Ordinary Kriging). test_1d(regr='linear', beta0=[0., 0.5]) test_1d(regr='quadratic', beta0=[0., 0.5, 0.5]) test_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5]) test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5]) def test_no_normalize(): gp = GaussianProcess(normalize=False).fit(X, y) y_pred = gp.predict(X) assert_true(np.allclose(y_pred, y)) def test_batch_size(): # TypeError when using batch_size on Python 3, see # https://github.com/scikit-learn/scikit-learn/issues/7329 for more # details gp = GaussianProcess() gp.fit(X, y) gp.predict(X, batch_size=1) gp.predict(X, batch_size=1, eval_MSE=True) def test_random_starts(): # Test that an increasing number of random-starts of GP fitting only # increases the reduced likelihood function of the optimal theta. n_samples, n_features = 50, 3 np.random.seed(0) rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) * 2 - 1 y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) best_likelihood = -np.inf for random_start in range(1, 5): gp = GaussianProcess(regr="constant", corr="squared_exponential", theta0=[1e-0] * n_features, thetaL=[1e-4] * n_features, thetaU=[1e+1] * n_features, random_start=random_start, random_state=0, verbose=False).fit(X, y) rlf = gp.reduced_likelihood_function()[0] assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps) best_likelihood = rlf def test_mse_solving(): # test the MSE estimate to be sane. # non-regression test for ignoring off-diagonals of feature covariance, # testing with nugget that renders covariance useless, only # using the mean function, with low effective rank of data gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4, thetaL=1e-12, thetaU=1e-2, nugget=1e-2, optimizer='Welch', regr="linear", random_state=0) X, y = make_regression(n_informative=3, n_features=60, noise=50, random_state=0, effective_rank=1) gp.fit(X, y) assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
bsd-3-clause
ibis-project/ibis
ibis/backends/parquet/tests/test_parquet.py
1
2747
import sys import pyarrow as pa import pyarrow.parquet as pq import pytest from pandas.util import testing as tm import ibis from ibis.backends.base.file import FileDatabase from ibis.backends.parquet import ParquetTable pytestmark = pytest.mark.skipif( sys.platform == 'win32', reason='See ibis issue #1698' ) @pytest.fixture def transformed(parquet): closes = parquet.pq.close opens = parquet.pq.open t = opens.inner_join(closes, ['time', 'ticker']) t = t[opens, closes.close] t = t.mutate(avg=(t.open + t.close) / 2) t = t[['time', 'ticker', 'avg']] return t def test_creation(parquet): # we have existing files in our dir d = parquet.client.root assert len(list(d.iterdir())) == 1 pqd = d / 'pq' assert len(list(pqd.iterdir())) == 2 assert len(pq.read_table(str(pqd / 'open.parquet'))) == 50 assert len(pq.read_table(str(pqd / 'close.parquet'))) == 50 def test_client(tmpdir, file_backends_data): # construct with a path to a file d = tmpdir / 'pq' d.mkdir() for k, v in file_backends_data.items(): f = d / "{}.parquet".format(k) table = pa.Table.from_pandas(v) pq.write_table(table, str(f)) c = ibis.parquet.connect(tmpdir) assert c.list_databases() == ['pq'] assert c.database().pq.list_tables() == ['close', 'open'] def test_navigation(parquet): # directory navigation assert isinstance(parquet, FileDatabase) result = dir(parquet) assert result == ['pq'] d = parquet.pq assert isinstance(d, FileDatabase) result = dir(d) assert result == ['close', 'open'] result = d.list_tables() assert result == ['close', 'open'] opens = d.open assert isinstance(opens.op(), ParquetTable) closes = d.close assert isinstance(closes.op(), ParquetTable) def test_read(parquet, file_backends_data): closes = parquet.pq.close assert str(closes) is not None result = closes.execute() expected = file_backends_data['close'] tm.assert_frame_equal(result, expected) result = closes.execute() tm.assert_frame_equal(result, expected) def test_write(transformed, tmpdir): t = transformed expected = t.execute() tpath = tmpdir / 'new_dir' tpath.mkdir() path = tpath / 'foo.parquet' assert not path.exists() t = transformed[['time', 'ticker', 'avg']] c = ibis.parquet.connect(tpath) c.insert('foo.parquet', t) t.execute() assert path.exists() # readback c = ibis.parquet.connect(str(tpath)).database() result = c.list_databases() assert result == [] result = c.foo.execute() tm.assert_frame_equal(result, expected) path = tpath / 'foo.parquet' assert path.exists()
apache-2.0
zonemercy/Kaggle
quora/solution/keras_oof.py
1
10725
from __future__ import division import pandas as pd import numpy as np import random, os, gc import config from scipy import sparse as ssp from sklearn.utils import resample,shuffle from sklearn.metrics import log_loss, roc_auc_score from sklearn.cross_validation import train_test_split from sklearn.feature_selection import SelectKBest, chi2, SelectPercentile, f_classif from sklearn import preprocessing from tqdm import tqdm from keras.models import Sequential from keras.layers.core import Dense, Activation, Dropout from keras.layers.embeddings import Embedding from keras.layers.recurrent import LSTM, GRU from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras.engine.topology import Merge from keras.layers import TimeDistributed, Lambda from keras.layers import Convolution1D, GlobalMaxPooling1D from keras.callbacks import ModelCheckpoint from keras import backend as K from keras.layers.advanced_activations import PReLU from keras.preprocessing import sequence, text RAW_PATH=config.RAW_PATH FEAT_PATH =config.FEAT_PATH train = pd.read_csv(RAW_PATH+'train.csv') train.drop(train.index[[config.ab_dup_test]], inplace=True) train.reset_index(drop=True, inplace=True) train_y = train.is_duplicate.values feat_df = ['feat_ab.csv','feature_vect_lemmer.csv','feat_158_stpf.csv'] df = pd.read_csv(FEAT_PATH+'magic_feature.csv') del df['question1'], df['question2'], df['id'] print 'feat_mag {}'.format(df.shape) def remove_col(train): list1=['question1','question2','id','is_duplicate'] for i in list1: if i in list(train.columns): del train[i] return train for f in feat_df: df1 = pd.read_csv(FEAT_PATH+f) df1 = remove_col(df1) df = pd.concat([df, df1],axis=1) del df1 gc.collect() print f, df.shape feature_base_close_porter = pd.read_csv(FEAT_PATH+'feature_base_close_porter.csv') del feature_base_close_porter['question1'], feature_base_close_porter['question2'], feature_base_close_porter['is_duplicate'] print 'feature_base_close_porter {}'.format(feature_base_close_porter.shape) df = pd.concat([df, feature_base_close_porter], axis=1) print 'df: {}'.format(df.shape) del feature_base_close_porter gc.collect() del_feat = ['q1_hash','q2_hash','q_hash_pos','q_hash_pos_1','q1_change','q2_change'] del_feat.extend(['q_change_pair','q1_q2_change_max']) del_feat.extend(['euclidean_distance', 'jaccard_distance','RMSE_distance']) del_feat.extend(['freq_diff', 'q1_q2_intersect_ratio']) del_feat.extend(list(tr_corr[abs(tr_corr['is_duplicate'])<0.01].index)) print df.shape for i in list(df.columns): if i in del_feat: del df[i] # df = df[use_feat] print df.shape ########### select k best features ############# train = df[df['is_duplicate']!=-1].copy() train =train.replace([np.inf, -np.inf], np.nan).dropna() full_feat = list(train.columns) full_feat.remove('is_duplicate') min_max_scaler = preprocessing.MinMaxScaler() train[full_feat] = min_max_scaler.fit_transform(train[full_feat]) selector = SelectKBest(chi2, k=200) selector.fit(train[full_feat], train['is_duplicate']) idxs_selected = selector.get_support(indices=True) columns_selected = train[full_feat].columns[idxs_selected] print columns_selected del train gc.collect() df = df[list(columns_selected)+['is_duplicate']] def oversample(X_ot,y,p=0.173): raw_num = X_ot.shape[0] print "RAW shape: {} | Mean rate: {}".format(X_ot.shape[0], y.mean()) pos_ot = X_ot[y==1] neg_ot = X_ot[y==0] #p = 0.165 scale = ((pos_ot.shape[0]*1.0 / (pos_ot.shape[0] + neg_ot.shape[0])) / p) - 1 while scale > 1: neg_ot = np.vstack([neg_ot, neg_ot]) scale -=1 neg_ot = np.vstack([neg_ot, neg_ot[:int(scale * neg_ot.shape[0])]]) ot = np.vstack([pos_ot, neg_ot]) y=np.zeros(ot.shape[0]) y[:pos_ot.shape[0]]=1.0 print "Oversample: {} | Mean rate: {}".format(ot.shape[0],y.mean()) return ot,y test = df[df['is_duplicate']==-1].copy() del test['is_duplicate'] train = df[df['is_duplicate']!=-1].copy() del train['is_duplicate'] del df gc.collect() print train.shape, test.shape ############### drop absolute duplicate rows ################# train.drop(train.index[[config.ab_dup_test]], inplace=True) train.reset_index(drop=True, inplace=True) embeddings_index = {} f = open(config.RAW_PATH+'glove.840B.300d.txt') for line in tqdm(f): values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() tk = text.Tokenizer(nb_words=200000) max_len = 140 tk.fit_on_texts(list(test.question1.values) + list(test.question2.values.astype(str))) x1_test = tk.texts_to_sequences(test.question1.values) x1_test = sequence.pad_sequences(x1_test, maxlen=max_len) x2_test = tk.texts_to_sequences(test.question2.values.astype(str)) x2_test = sequence.pad_sequences(x2_test, maxlen=max_len) kf = StratifiedKFold(n_splits=5, random_state=random_seed, shuffle=True) for train_idx, valid_idx in kf.split(train, y=train_y): X_train, X_test, y_train, y_test = train.loc[train_idx,:], train.loc[valid_idx,:], train_y[train_idx],train_y[valid_idx] print X_train.shape, y_train.shape X_train,y_train = oversample(X_train,y_train,p=0.1742) X_test,y_test = oversample(X_test,y_test,p=0.1742) X_train,y_train = shuffle(X_train,y_train,random_state=42) print X_train.shape, y_train.shape tk = text.Tokenizer(nb_words=200000) max_len = 140 tk.fit_on_texts(list(train.question1.values) + list(train.question2.values.astype(str))) x1 = tk.texts_to_sequences(X_train.question1.values) x1 = sequence.pad_sequences(x1, maxlen=max_len) x2 = tk.texts_to_sequences(X_train.question2.values.astype(str)) x2 = sequence.pad_sequences(x2, maxlen=max_len) word_index = tk.word_index print('Found %s word vectors.' % len(embeddings_index)) embedding_matrix = np.zeros((len(word_index) + 1, 300)) for word, i in tqdm(word_index.items()): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector max_features = 200000 filter_length = 5 nb_filter = 64 pool_length = 4 model = Sequential() print('Build model...') model1 = Sequential() model1.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False)) model1.add(TimeDistributed(Dense(300, activation='relu'))) model1.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,))) model2 = Sequential() model2.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False)) model2.add(TimeDistributed(Dense(300, activation='relu'))) model2.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,))) model3 = Sequential() model3.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False)) model3.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) model3.add(Dropout(0.2)) model3.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) model3.add(GlobalMaxPooling1D()) model3.add(Dropout(0.2)) model3.add(Dense(300)) model3.add(Dropout(0.2)) model3.add(BatchNormalization()) model4 = Sequential() model4.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False)) model4.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) model4.add(Dropout(0.2)) model4.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) model4.add(GlobalMaxPooling1D()) model4.add(Dropout(0.2)) model4.add(Dense(300)) model4.add(Dropout(0.2)) model4.add(BatchNormalization()) model5 = Sequential() model5.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2)) model5.add(LSTM(300, dropout_W=0.2, dropout_U=0.2)) model6 = Sequential() model6.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2)) model6.add(LSTM(300, dropout_W=0.2, dropout_U=0.2)) merged_model = Sequential() merged_model.add(Merge([model1, model2, model3, model4, model5, model6], mode='concat')) merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) merged_model.add(BatchNormalization()) merged_model.add(Dense(1)) merged_model.add(Activation('sigmoid')) merged_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) checkpoint = ModelCheckpoint('weights.h5', monitor='val_acc', save_best_only=True, verbose=2) merged_model.fit( [x1_train, x2_train, x1_train, x2_train, x1_train, x2_train], y=y_train, \ validation_data=([x1_valid, x2_valid, x1_valid, x2_valid, x1_valid, x2_valid], y_labels), \ batch_size=384, nb_epoch=200, verbose=1, shuffle=True, callbacks=[checkpoint] )
mit
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/pandas/tests/io/parser/common.py
4
60970
# -*- coding: utf-8 -*- import csv import os import platform import codecs import re import sys from datetime import datetime import pytest import numpy as np from pandas._libs.lib import Timestamp import pandas as pd import pandas.util.testing as tm from pandas import DataFrame, Series, Index, MultiIndex from pandas import compat from pandas.compat import (StringIO, BytesIO, PY3, range, lrange, u) from pandas.errors import DtypeWarning, EmptyDataError, ParserError from pandas.io.common import URLError from pandas.io.parsers import TextFileReader, TextParser class ParserTests(object): """ Want to be able to test either C+Cython or Python+Cython parsers """ data1 = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo2,12,13,14,15 bar2,12,13,14,15 """ def test_empty_decimal_marker(self): data = """A|B|C 1|2,334|5 10|13|10. """ # Parsers support only length-1 decimals msg = 'Only length-1 decimal markers supported' with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), decimal='') def test_bad_stream_exception(self): # Issue 13652: # This test validates that both python engine # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') # stream must be binary UTF8 stream = codecs.StreamRecoder( handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter) if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" with tm.assert_raises_regex(UnicodeDecodeError, msg): self.read_csv(stream) stream.close() def test_read_csv(self): if not compat.PY3: if compat.is_platform_windows(): prefix = u("file:///") else: prefix = u("file://") fname = prefix + compat.text_type(self.csv1) self.read_csv(fname, index_col=0, parse_dates=True) def test_1000_sep(self): data = """A|B|C 1|2,334|5 10|13|10. """ expected = DataFrame({ 'A': [1, 10], 'B': [2334, 13], 'C': [5, 10.] }) df = self.read_csv(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data), sep='|', thousands=',') tm.assert_frame_equal(df, expected) def test_squeeze(self): data = """\ a,1 b,2 c,3 """ idx = Index(['a', 'b', 'c'], name=0) expected = Series([1, 2, 3], name=1, index=idx) result = self.read_table(StringIO(data), sep=',', index_col=0, header=None, squeeze=True) assert isinstance(result, Series) tm.assert_series_equal(result, expected) def test_squeeze_no_view(self): # see gh-8217 # Series should not be a view data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13""" result = self.read_csv(StringIO(data), index_col='time', squeeze=True) assert not result._is_view def test_malformed(self): # see gh-6607 # all data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#') # first chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(5) # middle chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read(3) # last chunk data = """ignore A,B,C skip 1,2,3 3,5,10 # comment 1,2,3,4,5 2,3,4 """ msg = 'Expected 3 fields in line 6, saw 5' with tm.assert_raises_regex(Exception, msg): it = self.read_table(StringIO(data), sep=',', header=1, comment='#', iterator=True, chunksize=1, skiprows=[2]) it.read() # skipfooter is not supported with the C parser yet if self.engine == 'python': # skipfooter data = """ignore A,B,C 1,2,3 # comment 1,2,3,4,5 2,3,4 footer """ msg = 'Expected 3 fields in line 4, saw 5' with tm.assert_raises_regex(Exception, msg): self.read_table(StringIO(data), sep=',', header=1, comment='#', skipfooter=1) def test_quoting(self): bad_line_small = """printer\tresult\tvariant_name Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten"" Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa pytest.raises(Exception, self.read_table, StringIO(bad_line_small), sep='\t') good_line_small = bad_line_small + '"' df = self.read_table(StringIO(good_line_small), sep='\t') assert len(df) == 3 def test_unnamed_columns(self): data = """A,B,C,, 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ expected = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], dtype=np.int64) df = self.read_table(StringIO(data), sep=',') tm.assert_almost_equal(df.values, expected) tm.assert_index_equal(df.columns, Index(['A', 'B', 'C', 'Unnamed: 3', 'Unnamed: 4'])) def test_duplicate_columns(self): # TODO: add test for condition 'mangle_dupe_cols=False' # once it is actually supported (gh-12935) data = """A,A,B,B,B 1,2,3,4,5 6,7,8,9,10 11,12,13,14,15 """ for method in ('read_csv', 'read_table'): # check default behavior df = getattr(self, method)(StringIO(data), sep=',') assert list(df.columns) == ['A', 'A.1', 'B', 'B.1', 'B.2'] df = getattr(self, method)(StringIO(data), sep=',', mangle_dupe_cols=True) assert list(df.columns) == ['A', 'A.1', 'B', 'B.1', 'B.2'] def test_csv_mixed_type(self): data = """A,B,C a,1,2 b,3,4 c,4,5 """ expected = DataFrame({'A': ['a', 'b', 'c'], 'B': [1, 3, 4], 'C': [2, 4, 5]}) out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D'])) assert df.index.name == 'index' assert isinstance( df.index[0], (datetime, np.datetime64, Timestamp)) assert df.values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_csv_no_index_name(self): df = self.read_csv(self.csv2, index_col=0, parse_dates=True) df2 = self.read_table(self.csv2, sep=',', index_col=0, parse_dates=True) tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D', 'E'])) assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp)) assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64 tm.assert_frame_equal(df, df2) def test_read_table_unicode(self): fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8')) df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None) assert isinstance(df1[0].values[0], compat.text_type) def test_read_table_wrong_num_columns(self): # too few! data = """A,B,C,D,E,F 1,2,3,4,5,6 6,7,8,9,10,11,12 11,12,13,14,15,16 """ pytest.raises(ValueError, self.read_csv, StringIO(data)) def test_read_duplicate_index_explicit(self): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ result = self.read_csv(StringIO(data), index_col=0) expected = self.read_csv(StringIO(data)).set_index( 'index', verify_integrity=False) tm.assert_frame_equal(result, expected) result = self.read_table(StringIO(data), sep=',', index_col=0) expected = self.read_table(StringIO(data), sep=',', ).set_index( 'index', verify_integrity=False) tm.assert_frame_equal(result, expected) def test_read_duplicate_index_implicit(self): data = """A,B,C,D foo,2,3,4,5 bar,7,8,9,10 baz,12,13,14,15 qux,12,13,14,15 foo,12,13,14,15 bar,12,13,14,15 """ # make sure an error isn't thrown self.read_csv(StringIO(data)) self.read_table(StringIO(data), sep=',') def test_parse_bools(self): data = """A,B True,1 False,2 True,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.bool_ data = """A,B YES,1 no,2 yes,3 No,3 Yes,3 """ data = self.read_csv(StringIO(data), true_values=['yes', 'Yes', 'YES'], false_values=['no', 'NO', 'No']) assert data['A'].dtype == np.bool_ data = """A,B TRUE,1 FALSE,2 TRUE,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.bool_ data = """A,B foo,bar bar,foo""" result = self.read_csv(StringIO(data), true_values=['foo'], false_values=['bar']) expected = DataFrame({'A': [True, False], 'B': [False, True]}) tm.assert_frame_equal(result, expected) def test_int_conversion(self): data = """A,B 1.0,1 2.0,2 3.0,3 """ data = self.read_csv(StringIO(data)) assert data['A'].dtype == np.float64 assert data['B'].dtype == np.int64 def test_read_nrows(self): expected = self.read_csv(StringIO(self.data1))[:3] df = self.read_csv(StringIO(self.data1), nrows=3) tm.assert_frame_equal(df, expected) # see gh-10476 df = self.read_csv(StringIO(self.data1), nrows=3.0) tm.assert_frame_equal(df, expected) msg = r"'nrows' must be an integer >=0" with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows=1.2) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows='foo') with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), nrows=-1) def test_read_chunksize(self): reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2) df = self.read_csv(StringIO(self.data1), index_col=0) chunks = list(reader) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) # with invalid chunksize value: msg = r"'chunksize' must be an integer >=1" with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize=1.3) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize='foo') with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(self.data1), chunksize=0) def test_read_chunksize_and_nrows(self): # gh-15755 # With nrows reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(pd.concat(reader), df) # chunksize > nrows reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=8, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(pd.concat(reader), df) # with changing "size": reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=8, nrows=5) df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5) tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2]) tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5]) with pytest.raises(StopIteration): reader.get_chunk(size=3) def test_read_chunksize_named(self): reader = self.read_csv( StringIO(self.data1), index_col='index', chunksize=2) df = self.read_csv(StringIO(self.data1), index_col='index') chunks = list(reader) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) def test_get_chunk_passed_chunksize(self): data = """A,B,C 1,2,3 4,5,6 7,8,9 1,2,3""" result = self.read_csv(StringIO(data), chunksize=2) piece = result.get_chunk() assert len(piece) == 2 def test_read_chunksize_generated_index(self): # GH 12185 reader = self.read_csv(StringIO(self.data1), chunksize=2) df = self.read_csv(StringIO(self.data1)) tm.assert_frame_equal(pd.concat(reader), df) reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0) df = self.read_csv(StringIO(self.data1), index_col=0) tm.assert_frame_equal(pd.concat(reader), df) def test_read_text_list(self): data = """A,B,C\nfoo,1,2,3\nbar,4,5,6""" as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar', '4', '5', '6']] df = self.read_csv(StringIO(data), index_col=0) parser = TextParser(as_list, index_col=0, chunksize=2) chunk = parser.read(None) tm.assert_frame_equal(chunk, df) def test_iterator(self): # See gh-6607 reader = self.read_csv(StringIO(self.data1), index_col=0, iterator=True) df = self.read_csv(StringIO(self.data1), index_col=0) chunk = reader.read(3) tm.assert_frame_equal(chunk, df[:3]) last_chunk = reader.read(5) tm.assert_frame_equal(last_chunk, df[3:]) # pass list lines = list(csv.reader(StringIO(self.data1))) parser = TextParser(lines, index_col=0, chunksize=2) df = self.read_csv(StringIO(self.data1), index_col=0) chunks = list(parser) tm.assert_frame_equal(chunks[0], df[:2]) tm.assert_frame_equal(chunks[1], df[2:4]) tm.assert_frame_equal(chunks[2], df[4:]) # pass skiprows parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1]) chunks = list(parser) tm.assert_frame_equal(chunks[0], df[1:3]) treader = self.read_table(StringIO(self.data1), sep=',', index_col=0, iterator=True) assert isinstance(treader, TextFileReader) # gh-3967: stopping iteration when chunksize is specified data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ reader = self.read_csv(StringIO(data), iterator=True) result = list(reader) expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[ 3, 6, 9]), index=['foo', 'bar', 'baz']) tm.assert_frame_equal(result[0], expected) # chunksize = 1 reader = self.read_csv(StringIO(data), chunksize=1) result = list(reader) expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[ 3, 6, 9]), index=['foo', 'bar', 'baz']) assert len(result) == 3 tm.assert_frame_equal(pd.concat(result), expected) # skipfooter is not supported with the C parser yet if self.engine == 'python': # test bad parameter (skipfooter) reader = self.read_csv(StringIO(self.data1), index_col=0, iterator=True, skipfooter=1) pytest.raises(ValueError, reader.read, 3) def test_pass_names_with_index(self): lines = self.data1.split('\n') no_header = '\n'.join(lines[1:]) # regular index names = ['index', 'A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=0, names=names) expected = self.read_csv(StringIO(self.data1), index_col=0) tm.assert_frame_equal(df, expected) # multi index data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ lines = data.split('\n') no_header = '\n'.join(lines[1:]) names = ['index1', 'index2', 'A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=[0, 1], names=names) expected = self.read_csv(StringIO(data), index_col=[0, 1]) tm.assert_frame_equal(df, expected) df = self.read_csv(StringIO(data), index_col=['index1', 'index2']) tm.assert_frame_equal(df, expected) def test_multi_index_no_level_names(self): data = """index1,index2,A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ data2 = """A,B,C,D foo,one,2,3,4,5 foo,two,7,8,9,10 foo,three,12,13,14,15 bar,one,12,13,14,15 bar,two,12,13,14,15 """ lines = data.split('\n') no_header = '\n'.join(lines[1:]) names = ['A', 'B', 'C', 'D'] df = self.read_csv(StringIO(no_header), index_col=[0, 1], header=None, names=names) expected = self.read_csv(StringIO(data), index_col=[0, 1]) tm.assert_frame_equal(df, expected, check_names=False) # 2 implicit first cols df2 = self.read_csv(StringIO(data2)) tm.assert_frame_equal(df2, df) # reverse order of index df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names, header=None) expected = self.read_csv(StringIO(data), index_col=[1, 0]) tm.assert_frame_equal(df, expected, check_names=False) def test_multi_index_blank_df(self): # GH 14545 data = """a,b """ df = self.read_csv(StringIO(data), header=[0]) expected = DataFrame(columns=['a', 'b']) tm.assert_frame_equal(df, expected) round_trip = self.read_csv(StringIO( expected.to_csv(index=False)), header=[0]) tm.assert_frame_equal(round_trip, expected) data_multiline = """a,b c,d """ df2 = self.read_csv(StringIO(data_multiline), header=[0, 1]) cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')]) expected2 = DataFrame(columns=cols) tm.assert_frame_equal(df2, expected2) round_trip = self.read_csv(StringIO( expected2.to_csv(index=False)), header=[0, 1]) tm.assert_frame_equal(round_trip, expected2) def test_no_unnamed_index(self): data = """ id c0 c1 c2 0 1 0 a b 1 2 0 c d 2 2 2 e f """ df = self.read_table(StringIO(data), sep=' ') assert df.index.name is None def test_read_csv_parse_simple_list(self): text = """foo bar baz qux foo foo bar""" df = self.read_csv(StringIO(text), header=None) expected = DataFrame({0: ['foo', 'bar baz', 'qux foo', 'foo', 'bar']}) tm.assert_frame_equal(df, expected) @tm.network def test_url(self): # HTTP(S) url = ('https://raw.github.com/pandas-dev/pandas/master/' 'pandas/tests/io/parser/data/salaries.csv') url_table = self.read_table(url) dirpath = tm.get_data_path() localtable = os.path.join(dirpath, 'salaries.csv') local_table = self.read_table(localtable) tm.assert_frame_equal(url_table, local_table) # TODO: ftp testing @tm.slow def test_file(self): dirpath = tm.get_data_path() localtable = os.path.join(dirpath, 'salaries.csv') local_table = self.read_table(localtable) try: url_table = self.read_table('file://localhost/' + localtable) except URLError: # fails on some systems pytest.skip("failing on %s" % ' '.join(platform.uname()).strip()) tm.assert_frame_equal(url_table, local_table) def test_path_pathlib(self): df = tm.makeDataFrame() result = tm.round_trip_pathlib(df.to_csv, lambda p: self.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_path_localpath(self): df = tm.makeDataFrame() result = tm.round_trip_localpath( df.to_csv, lambda p: self.read_csv(p, index_col=0)) tm.assert_frame_equal(df, result) def test_nonexistent_path(self): # gh-2428: pls no segfault # gh-14086: raise more helpful FileNotFoundError path = '%s.csv' % tm.rands(10) pytest.raises(compat.FileNotFoundError, self.read_csv, path) def test_missing_trailing_delimiters(self): data = """A,B,C,D 1,2,3,4 1,3,3, 1,4,5""" result = self.read_csv(StringIO(data)) assert result['D'].isnull()[1:].all() def test_skipinitialspace(self): s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' '1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, ' '314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, ' '70.06056, 344.98370, 1, 1, -0.689265, -0.692787, ' '0.212036, 14.7674, 41.605, -9999.0, -9999.0, ' '-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128') sfile = StringIO(s) # it's 33 columns result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'], header=None, skipinitialspace=True) assert pd.isnull(result.iloc[0, 29]) def test_utf16_bom_skiprows(self): # #2298 data = u("""skip this skip this too A\tB\tC 1\t2\t3 4\t5\t6""") data2 = u("""skip this skip this too A,B,C 1,2,3 4,5,6""") path = '__%s__.csv' % tm.rands(10) with tm.ensure_clean(path) as path: for sep, dat in [('\t', data), (',', data2)]: for enc in ['utf-16', 'utf-16le', 'utf-16be']: bytes = dat.encode(enc) with open(path, 'wb') as f: f.write(bytes) s = BytesIO(dat.encode('utf-8')) if compat.PY3: # somewhat False since the code never sees bytes from io import TextIOWrapper s = TextIOWrapper(s, encoding='utf-8') result = self.read_csv(path, encoding=enc, skiprows=2, sep=sep) expected = self.read_csv(s, encoding='utf-8', skiprows=2, sep=sep) s.close() tm.assert_frame_equal(result, expected) def test_utf16_example(self): path = tm.get_data_path('utf16_ex.txt') # it works! and is the right length result = self.read_table(path, encoding='utf-16') assert len(result) == 50 if not compat.PY3: buf = BytesIO(open(path, 'rb').read()) result = self.read_table(buf, encoding='utf-16') assert len(result) == 50 def test_unicode_encoding(self): pth = tm.get_data_path('unicode_series.csv') result = self.read_csv(pth, header=None, encoding='latin-1') result = result.set_index(0) got = result[1][1632] expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)') assert got == expected def test_trailing_delimiters(self): # #2442. grumble grumble data = """A,B,C 1,2,3, 4,5,6, 7,8,9,""" result = self.read_csv(StringIO(data), index_col=False) expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8], 'C': [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_escapechar(self): # http://stackoverflow.com/questions/13824840/feature-request-for- # pandas-read-csv data = '''SEARCH_TERM,ACTUAL_URL "bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" "SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa result = self.read_csv(StringIO(data), escapechar='\\', quotechar='"', encoding='utf-8') assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", ' 'IKEA:s 1700-tals serie') tm.assert_index_equal(result.columns, Index(['SEARCH_TERM', 'ACTUAL_URL'])) def test_int64_min_issues(self): # #2599 data = 'A,B\n0,0\n0,' result = self.read_csv(StringIO(data)) expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]}) tm.assert_frame_equal(result, expected) def test_parse_integers_above_fp_precision(self): data = """Numbers 17007000002000191 17007000002000191 17007000002000191 17007000002000191 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000192 17007000002000194""" result = self.read_csv(StringIO(data)) expected = DataFrame({'Numbers': [17007000002000191, 17007000002000191, 17007000002000191, 17007000002000191, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000192, 17007000002000194]}) assert np.array_equal(result['Numbers'], expected['Numbers']) def test_chunks_have_consistent_numerical_type(self): integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) with tm.assert_produces_warning(False): df = self.read_csv(StringIO(data)) # Assert that types were coerced. assert type(df.a[0]) is np.float64 assert df.a.dtype == np.float def test_warn_if_chunks_have_mismatched_type(self): warning_type = False integers = [str(i) for i in range(499999)] data = "a\n" + "\n".join(integers + ['a', 'b'] + integers) # see gh-3866: if chunks are different types and can't # be coerced using numerical types, then issue warning. if self.engine == 'c' and self.low_memory: warning_type = DtypeWarning with tm.assert_produces_warning(warning_type): df = self.read_csv(StringIO(data)) assert df.a.dtype == np.object def test_integer_overflow_bug(self): # see gh-2601 data = "65248E10 11\n55555E55 22\n" result = self.read_csv(StringIO(data), header=None, sep=' ') assert result[0].dtype == np.float64 result = self.read_csv(StringIO(data), header=None, sep=r'\s+') assert result[0].dtype == np.float64 def test_catch_too_many_names(self): # see gh-5156 data = """\ 1,2,3 4,,6 7,8,9 10,11,12\n""" pytest.raises(ValueError, self.read_csv, StringIO(data), header=0, names=['a', 'b', 'c', 'd']) def test_ignore_leading_whitespace(self): # see gh-3374, gh-6607 data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9' result = self.read_table(StringIO(data), sep=r'\s+') expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]}) tm.assert_frame_equal(result, expected) def test_chunk_begins_with_newline_whitespace(self): # see gh-10022 data = '\n hello\nworld\n' result = self.read_csv(StringIO(data), header=None) assert len(result) == 2 # see gh-9735: this issue is C parser-specific (bug when # parsing whitespace and characters at chunk boundary) if self.engine == 'c': chunk1 = 'a' * (1024 * 256 - 2) + '\na' chunk2 = '\n a' result = self.read_csv(StringIO(chunk1 + chunk2), header=None) expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a']) tm.assert_frame_equal(result, expected) def test_empty_with_index(self): # see gh-10184 data = 'x,y' result = self.read_csv(StringIO(data), index_col=0) expected = DataFrame([], columns=['y'], index=Index([], name='x')) tm.assert_frame_equal(result, expected) def test_empty_with_multiindex(self): # see gh-10467 data = 'x,y,z' result = self.read_csv(StringIO(data), index_col=['x', 'y']) expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays( [[]] * 2, names=['x', 'y'])) tm.assert_frame_equal(result, expected, check_index_type=False) def test_empty_with_reversed_multiindex(self): data = 'x,y,z' result = self.read_csv(StringIO(data), index_col=[1, 0]) expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays( [[]] * 2, names=['y', 'x'])) tm.assert_frame_equal(result, expected, check_index_type=False) def test_float_parser(self): # see gh-9565 data = '45e-1,4.5,45.,inf,-inf' result = self.read_csv(StringIO(data), header=None) expected = DataFrame([[float(s) for s in data.split(',')]]) tm.assert_frame_equal(result, expected) def test_scientific_no_exponent(self): # see gh-12215 df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']), ('y', ['42e']), ('z', ['632E'])]) data = df.to_csv(index=False) for prec in self.float_precision_choices: df_roundtrip = self.read_csv( StringIO(data), float_precision=prec) tm.assert_frame_equal(df_roundtrip, df) def test_int64_overflow(self): data = """ID 00013007854817840016671868 00013007854817840016749251 00013007854817840016754630 00013007854817840016781876 00013007854817840017028824 00013007854817840017963235 00013007854817840018860166""" # 13007854817840016671868 > UINT64_MAX, so this # will overflow and return object as the dtype. result = self.read_csv(StringIO(data)) assert result['ID'].dtype == object # 13007854817840016671868 > UINT64_MAX, so attempts # to cast to either int64 or uint64 will result in # an OverflowError being raised. for conv in (np.int64, np.uint64): pytest.raises(OverflowError, self.read_csv, StringIO(data), converters={'ID': conv}) # These numbers fall right inside the int64-uint64 range, # so they should be parsed as string. ui_max = np.iinfo(np.uint64).max i_max = np.iinfo(np.int64).max i_min = np.iinfo(np.int64).min for x in [i_max, i_min, ui_max]: result = self.read_csv(StringIO(str(x)), header=None) expected = DataFrame([x]) tm.assert_frame_equal(result, expected) # These numbers fall just outside the int64-uint64 range, # so they should be parsed as string. too_big = ui_max + 1 too_small = i_min - 1 for x in [too_big, too_small]: result = self.read_csv(StringIO(str(x)), header=None) expected = DataFrame([str(x)]) tm.assert_frame_equal(result, expected) # No numerical dtype can hold both negative and uint64 values, # so they should be cast as string. data = '-1\n' + str(2**63) expected = DataFrame([str(-1), str(2**63)]) result = self.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) data = str(2**63) + '\n-1' expected = DataFrame([str(2**63), str(-1)]) result = self.read_csv(StringIO(data), header=None) tm.assert_frame_equal(result, expected) def test_empty_with_nrows_chunksize(self): # see gh-9535 expected = DataFrame([], columns=['foo', 'bar']) result = self.read_csv(StringIO('foo,bar\n'), nrows=10) tm.assert_frame_equal(result, expected) result = next(iter(self.read_csv( StringIO('foo,bar\n'), chunksize=10))) tm.assert_frame_equal(result, expected) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = self.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True) result = DataFrame(result[2], columns=result[1], index=result[0]) tm.assert_frame_equal(DataFrame.from_records( result), expected, check_index_type=False) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = next(iter(self.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True))) result = DataFrame(result[2], columns=result[1], index=result[0]) tm.assert_frame_equal(DataFrame.from_records(result), expected, check_index_type=False) def test_eof_states(self): # see gh-10728, gh-10548 # With skip_blank_lines = True expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c']) # gh-10728: WHITESPACE_LINE data = 'a,b,c\n4,5,6\n ' result = self.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) # gh-10548: EAT_LINE_COMMENT data = 'a,b,c\n4,5,6\n#comment' result = self.read_csv(StringIO(data), comment='#') tm.assert_frame_equal(result, expected) # EAT_CRNL_NOP data = 'a,b,c\n4,5,6\n\r' result = self.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) # EAT_COMMENT data = 'a,b,c\n4,5,6#comment' result = self.read_csv(StringIO(data), comment='#') tm.assert_frame_equal(result, expected) # SKIP_LINE data = 'a,b,c\n4,5,6\nskipme' result = self.read_csv(StringIO(data), skiprows=[2]) tm.assert_frame_equal(result, expected) # With skip_blank_lines = False # EAT_LINE_COMMENT data = 'a,b,c\n4,5,6\n#comment' result = self.read_csv( StringIO(data), comment='#', skip_blank_lines=False) expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) # IN_FIELD data = 'a,b,c\n4,5,6\n ' result = self.read_csv(StringIO(data), skip_blank_lines=False) expected = DataFrame( [['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) # EAT_CRNL data = 'a,b,c\n4,5,6\n\r' result = self.read_csv(StringIO(data), skip_blank_lines=False) expected = DataFrame( [[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) # Should produce exceptions # ESCAPED_CHAR data = "a,b,c\n4,5,6\n\\" pytest.raises(Exception, self.read_csv, StringIO(data), escapechar='\\') # ESCAPE_IN_QUOTED_FIELD data = 'a,b,c\n4,5,6\n"\\' pytest.raises(Exception, self.read_csv, StringIO(data), escapechar='\\') # IN_QUOTED_FIELD data = 'a,b,c\n4,5,6\n"' pytest.raises(Exception, self.read_csv, StringIO(data), escapechar='\\') def test_uneven_lines_with_usecols(self): # See gh-12203 csv = r"""a,b,c 0,1,2 3,4,5,6,7 8,9,10 """ # make sure that an error is still thrown # when the 'usecols' parameter is not provided msg = r"Expected \d+ fields in line \d+, saw \d+" with tm.assert_raises_regex(ValueError, msg): df = self.read_csv(StringIO(csv)) expected = DataFrame({ 'a': [0, 3, 8], 'b': [1, 4, 9] }) usecols = [0, 1] df = self.read_csv(StringIO(csv), usecols=usecols) tm.assert_frame_equal(df, expected) usecols = ['a', 'b'] df = self.read_csv(StringIO(csv), usecols=usecols) tm.assert_frame_equal(df, expected) def test_read_empty_with_usecols(self): # See gh-12493 names = ['Dummy', 'X', 'Dummy_2'] usecols = names[1:2] # ['X'] # first, check to see that the response of # parser when faced with no provided columns # throws the correct error, with or without usecols errmsg = "No columns to parse from file" with tm.assert_raises_regex(EmptyDataError, errmsg): self.read_csv(StringIO('')) with tm.assert_raises_regex(EmptyDataError, errmsg): self.read_csv(StringIO(''), usecols=usecols) expected = DataFrame(columns=usecols, index=[0], dtype=np.float64) df = self.read_csv(StringIO(',,'), names=names, usecols=usecols) tm.assert_frame_equal(df, expected) expected = DataFrame(columns=usecols) df = self.read_csv(StringIO(''), names=names, usecols=usecols) tm.assert_frame_equal(df, expected) def test_trailing_spaces(self): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa expected = DataFrame([[1., 2., 4.], [5.1, np.nan, 10.]]) # gh-8661, gh-8679: this should ignore six lines including # lines with trailing whitespace and blank lines df = self.read_csv(StringIO(data.replace(',', ' ')), header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True) tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data.replace(',', ' ')), header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True) tm.assert_frame_equal(df, expected) # gh-8983: test skipping set of rows after a row with trailing spaces expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan], "C": [4., 10]}) df = self.read_table(StringIO(data.replace(',', ' ')), delim_whitespace=True, skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True) tm.assert_frame_equal(df, expected) def test_raise_on_sep_with_delim_whitespace(self): # see gh-6607 data = 'a b c\n1 2 3' with tm.assert_raises_regex(ValueError, 'you can only specify one'): self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True) def test_single_char_leading_whitespace(self): # see gh-9710 data = """\ MyColumn a b a b\n""" expected = DataFrame({'MyColumn': list('abab')}) result = self.read_csv(StringIO(data), delim_whitespace=True, skipinitialspace=True) tm.assert_frame_equal(result, expected) result = self.read_csv(StringIO(data), skipinitialspace=True) tm.assert_frame_equal(result, expected) def test_empty_lines(self): data = """\ A,B,C 1,2.,4. 5.,NaN,10.0 -70,.4,1 """ expected = np.array([[1., 2., 4.], [5., np.nan, 10.], [-70., .4, 1.]]) df = self.read_csv(StringIO(data)) tm.assert_numpy_array_equal(df.values, expected) df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+') tm.assert_numpy_array_equal(df.values, expected) expected = np.array([[1., 2., 4.], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], [5., np.nan, 10.], [np.nan, np.nan, np.nan], [-70., .4, 1.]]) df = self.read_csv(StringIO(data), skip_blank_lines=False) tm.assert_numpy_array_equal(df.values, expected) def test_whitespace_lines(self): data = """ \t \t\t \t A,B,C \t 1,2.,4. 5.,NaN,10.0 """ expected = np.array([[1, 2., 4.], [5., np.nan, 10.]]) df = self.read_csv(StringIO(data)) tm.assert_numpy_array_equal(df.values, expected) def test_regex_separator(self): # see gh-6607 data = """ A B C D a 1 2 3 4 b 1 2 3 4 c 1 2 3 4 """ df = self.read_table(StringIO(data), sep=r'\s+') expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)), index_col=0) assert expected.index.name is None tm.assert_frame_equal(df, expected) data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9' result = self.read_table(StringIO(data), sep=r'\s+') expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=['a', 'b', 'c']) tm.assert_frame_equal(result, expected) @tm.capture_stdout def test_verbose_import(self): text = """a,b,c,d one,1,2,3 one,1,2,3 ,1,2,3 one,1,2,3 ,1,2,3 ,1,2,3 one,1,2,3 two,1,2,3""" # Engines are verbose in different ways. self.read_csv(StringIO(text), verbose=True) output = sys.stdout.getvalue() if self.engine == 'c': assert 'Tokenization took:' in output assert 'Parser memory cleanup took:' in output else: # Python engine assert output == 'Filled 3 NA values in column a\n' # Reset the stdout buffer. sys.stdout = StringIO() text = """a,b,c,d one,1,2,3 two,1,2,3 three,1,2,3 four,1,2,3 five,1,2,3 ,1,2,3 seven,1,2,3 eight,1,2,3""" self.read_csv(StringIO(text), verbose=True, index_col=0) output = sys.stdout.getvalue() # Engines are verbose in different ways. if self.engine == 'c': assert 'Tokenization took:' in output assert 'Parser memory cleanup took:' in output else: # Python engine assert output == 'Filled 1 NA values in column a\n' def test_iteration_open_handle(self): if PY3: pytest.skip( "won't work in Python 3 {0}".format(sys.version_info)) with tm.ensure_clean() as path: with open(path, 'wb') as f: f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG') with open(path, 'rb') as f: for line in f: if 'CCC' in line: break if self.engine == 'c': pytest.raises(Exception, self.read_table, f, squeeze=True, header=None) else: result = self.read_table(f, squeeze=True, header=None) expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0) tm.assert_series_equal(result, expected) def test_1000_sep_with_decimal(self): data = """A|B|C 1|2,334.01|5 10|13|10. """ expected = DataFrame({ 'A': [1, 10], 'B': [2334.01, 13], 'C': [5, 10.] }) assert expected.A.dtype == 'int64' assert expected.B.dtype == 'float' assert expected.C.dtype == 'float' df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data), sep='|', thousands=',', decimal='.') tm.assert_frame_equal(df, expected) data_with_odd_sep = """A|B|C 1|2.334,01|5 10|13|10, """ df = self.read_csv(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',') tm.assert_frame_equal(df, expected) df = self.read_table(StringIO(data_with_odd_sep), sep='|', thousands='.', decimal=',') tm.assert_frame_equal(df, expected) def test_euro_decimal_format(self): data = """Id;Number1;Number2;Text1;Text2;Number3 1;1521,1541;187101,9543;ABC;poi;4,738797819 2;121,12;14897,76;DEF;uyt;0,377320872 3;878,158;108013,434;GHI;rez;2,735694704""" df2 = self.read_csv(StringIO(data), sep=';', decimal=',') assert df2['Number1'].dtype == float assert df2['Number2'].dtype == float assert df2['Number3'].dtype == float def test_read_duplicate_names(self): # See gh-7160 data = "a,b,a\n0,1,2\n3,4,5" df = self.read_csv(StringIO(data)) expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=['a', 'b', 'a.1']) tm.assert_frame_equal(df, expected) data = "0,1,2\n3,4,5" df = self.read_csv(StringIO(data), names=["a", "b", "a"]) expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=['a', 'b', 'a.1']) tm.assert_frame_equal(df, expected) def test_inf_parsing(self): data = """\ ,A a,inf b,-inf c,+Inf d,-Inf e,INF f,-INF g,+INf h,-INf i,inF j,-inF""" inf = float('inf') expected = Series([inf, -inf] * 5) df = self.read_csv(StringIO(data), index_col=0) tm.assert_almost_equal(df['A'].values, expected.values) df = self.read_csv(StringIO(data), index_col=0, na_filter=False) tm.assert_almost_equal(df['A'].values, expected.values) def test_raise_on_no_columns(self): # single newline data = "\n" pytest.raises(EmptyDataError, self.read_csv, StringIO(data)) # test with more than a single newline data = "\n\n\n" pytest.raises(EmptyDataError, self.read_csv, StringIO(data)) def test_compact_ints_use_unsigned(self): # see gh-13323 data = 'a,b,c\n1,9,258' # sanity check expected = DataFrame({ 'a': np.array([1], dtype=np.int64), 'b': np.array([9], dtype=np.int64), 'c': np.array([258], dtype=np.int64), }) out = self.read_csv(StringIO(data)) tm.assert_frame_equal(out, expected) expected = DataFrame({ 'a': np.array([1], dtype=np.int8), 'b': np.array([9], dtype=np.int8), 'c': np.array([258], dtype=np.int16), }) # default behaviour for 'use_unsigned' with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): out = self.read_csv(StringIO(data), compact_ints=True) tm.assert_frame_equal(out, expected) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): out = self.read_csv(StringIO(data), compact_ints=True, use_unsigned=False) tm.assert_frame_equal(out, expected) expected = DataFrame({ 'a': np.array([1], dtype=np.uint8), 'b': np.array([9], dtype=np.uint8), 'c': np.array([258], dtype=np.uint16), }) with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): out = self.read_csv(StringIO(data), compact_ints=True, use_unsigned=True) tm.assert_frame_equal(out, expected) def test_compact_ints_as_recarray(self): data = ('0,1,0,0\n' '1,1,0,0\n' '0,1,0,1') with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = self.read_csv(StringIO(data), delimiter=',', header=None, compact_ints=True, as_recarray=True) ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)]) assert result.dtype == ex_dtype with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): result = self.read_csv(StringIO(data), delimiter=',', header=None, as_recarray=True, compact_ints=True, use_unsigned=True) ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)]) assert result.dtype == ex_dtype def test_as_recarray(self): # basic test with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True) tm.assert_numpy_array_equal(out, expected) # index_col ignored with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True, index_col=0) tm.assert_numpy_array_equal(out, expected) # respects names with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = '1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), names=['a', 'b'], header=None, as_recarray=True) tm.assert_numpy_array_equal(out, expected) # header order is respected even though it conflicts # with the natural ordering of the column names with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'b,a\n1,a\n2,b' expected = np.array([(1, 'a'), (2, 'b')], dtype=[('b', '=i8'), ('a', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True) tm.assert_numpy_array_equal(out, expected) # overrides the squeeze parameter with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a\n1' expected = np.array([(1,)], dtype=[('a', '=i8')]) out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True) tm.assert_numpy_array_equal(out, expected) # does data conversions before doing recarray conversion with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' conv = lambda x: int(x) + 1 expected = np.array([(2, 'a'), (3, 'b')], dtype=[('a', '=i8'), ('b', 'O')]) out = self.read_csv(StringIO(data), as_recarray=True, converters={'a': conv}) tm.assert_numpy_array_equal(out, expected) # filters by usecols before doing recarray conversion with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): data = 'a,b\n1,a\n2,b' expected = np.array([(1,), (2,)], dtype=[('a', '=i8')]) out = self.read_csv(StringIO(data), as_recarray=True, usecols=['a']) tm.assert_numpy_array_equal(out, expected) def test_memory_map(self): mmap_file = os.path.join(self.dirpath, 'test_mmap.csv') expected = DataFrame({ 'a': [1, 2, 3], 'b': ['one', 'two', 'three'], 'c': ['I', 'II', 'III'] }) out = self.read_csv(mmap_file, memory_map=True) tm.assert_frame_equal(out, expected) def test_null_byte_char(self): # see gh-2741 data = '\x00,foo' cols = ['a', 'b'] expected = DataFrame([[np.nan, 'foo']], columns=cols) if self.engine == 'c': out = self.read_csv(StringIO(data), names=cols) tm.assert_frame_equal(out, expected) else: msg = "NULL byte detected" with tm.assert_raises_regex(ParserError, msg): self.read_csv(StringIO(data), names=cols) def test_utf8_bom(self): # see gh-4793 bom = u('\ufeff') utf8 = 'utf-8' def _encode_data_with_bom(_data): bom_data = (bom + _data).encode(utf8) return BytesIO(bom_data) # basic test data = 'a\n1' expected = DataFrame({'a': [1]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8) tm.assert_frame_equal(out, expected) # test with "regular" quoting data = '"a"\n1' expected = DataFrame({'a': [1]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, quotechar='"') tm.assert_frame_equal(out, expected) # test in a data row instead of header data = 'b\n1' expected = DataFrame({'a': ['b', '1']}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, names=['a']) tm.assert_frame_equal(out, expected) # test in empty data row with skipping data = '\n1' expected = DataFrame({'a': [1]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, names=['a'], skip_blank_lines=True) tm.assert_frame_equal(out, expected) # test in empty data row without skipping data = '\n1' expected = DataFrame({'a': [np.nan, 1.0]}) out = self.read_csv(_encode_data_with_bom(data), encoding=utf8, names=['a'], skip_blank_lines=False) tm.assert_frame_equal(out, expected) def test_temporary_file(self): # see gh-13398 data1 = "0 0" from tempfile import TemporaryFile new_file = TemporaryFile("w+") new_file.write(data1) new_file.flush() new_file.seek(0) result = self.read_csv(new_file, sep=r'\s+', header=None) new_file.close() expected = DataFrame([[0, 0]]) tm.assert_frame_equal(result, expected) def test_read_csv_utf_aliases(self): # see gh issue 13549 expected = pd.DataFrame({'mb_num': [4.8], 'multibyte': ['test']}) for byte in [8, 16]: for fmt in ['utf-{0}', 'utf_{0}', 'UTF-{0}', 'UTF_{0}']: encoding = fmt.format(byte) data = 'mb_num,multibyte\n4.8,test'.encode(encoding) result = self.read_csv(BytesIO(data), encoding=encoding) tm.assert_frame_equal(result, expected) def test_internal_eof_byte(self): # see gh-5500 data = "a,b\n1\x1a,2" expected = pd.DataFrame([["1\x1a", 2]], columns=['a', 'b']) result = self.read_csv(StringIO(data)) tm.assert_frame_equal(result, expected) def test_internal_eof_byte_to_file(self): # see gh-16559 data = b'c1,c2\r\n"test \x1a test", test\r\n' expected = pd.DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) path = '__%s__.csv' % tm.rands(10) with tm.ensure_clean(path) as path: with open(path, "wb") as f: f.write(data) result = self.read_csv(path) tm.assert_frame_equal(result, expected) def test_file_handles(self): # GH 14418 - don't close user provided file handles fh = StringIO('a,b\n1,2') self.read_csv(fh) assert not fh.closed with open(self.csv1, 'r') as f: self.read_csv(f) assert not f.closed # mmap not working with python engine if self.engine != 'python': import mmap with open(self.csv1, 'r') as f: m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) self.read_csv(m) # closed attribute new in python 3.2 if PY3: assert not m.closed m.close() def test_invalid_file_buffer(self): # see gh-15337 class InvalidBuffer(object): pass msg = "Invalid file path or buffer object type" with tm.assert_raises_regex(ValueError, msg): self.read_csv(InvalidBuffer()) # gh-16135: we want to ensure that "tell" and "seek" # aren't actually being used when we call `read_csv` # # Thus, while the object may look "invalid" (these # methods are attributes of the `StringIO` class), # it is still a valid file-object for our purposes. class NoSeekTellBuffer(StringIO): def tell(self): raise AttributeError("No tell method") def seek(self, pos, whence=0): raise AttributeError("No seek method") data = "a\n1" expected = pd.DataFrame({"a": [1]}) result = self.read_csv(NoSeekTellBuffer(data)) tm.assert_frame_equal(result, expected) if PY3: from unittest import mock with tm.assert_raises_regex(ValueError, msg): self.read_csv(mock.Mock()) @tm.capture_stderr def test_skip_bad_lines(self): # see gh-15925 data = 'a\n1\n1,2,3\n4\n5,6,7' with pytest.raises(ParserError): self.read_csv(StringIO(data)) with pytest.raises(ParserError): self.read_csv(StringIO(data), error_bad_lines=True) expected = DataFrame({'a': [1, 4]}) out = self.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=False) tm.assert_frame_equal(out, expected) val = sys.stderr.getvalue() assert val == '' # Reset the stderr buffer. sys.stderr = StringIO() out = self.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True) tm.assert_frame_equal(out, expected) val = sys.stderr.getvalue() assert 'Skipping line 3' in val assert 'Skipping line 5' in val
mit
loli/sklearn-ensembletrees
examples/decomposition/plot_ica_vs_pca.py
43
3343
""" ========================== FastICA on 2D point clouds ========================== This example illustrates visually in the feature space a comparison by results using two different component analysis techniques. :ref:`ICA` vs :ref:`PCA`. Representing ICA in the feature space gives the view of 'geometric ICA': ICA is an algorithm that finds directions in the feature space corresponding to projections with high non-Gaussianity. These directions need not be orthogonal in the original feature space, but they are orthogonal in the whitened feature space, in which all directions correspond to the same variance. PCA, on the other hand, finds orthogonal directions in the raw feature space that correspond to directions accounting for maximum variance. Here we simulate independent sources using a highly non-Gaussian process, 2 student T with a low number of degrees of freedom (top left figure). We mix them to create observations (top right figure). In this raw observation space, directions identified by PCA are represented by orange vectors. We represent the signal in the PCA space, after whitening by the variance corresponding to the PCA vectors (lower left). Running ICA corresponds to finding a rotation in this space to identify the directions of largest non-Gaussianity (lower right). """ print(__doc__) # Authors: Alexandre Gramfort, Gael Varoquaux # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, FastICA ############################################################################### # Generate sample data rng = np.random.RandomState(42) S = rng.standard_t(1.5, size=(20000, 2)) S[:, 0] *= 2. # Mix data A = np.array([[1, 1], [0, 2]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations pca = PCA() S_pca_ = pca.fit(X).transform(X) ica = FastICA(random_state=rng) S_ica_ = ica.fit(X).transform(X) # Estimate the sources S_ica_ /= S_ica_.std(axis=0) ############################################################################### # Plot results def plot_samples(S, axis_list=None): plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', linewidths=0, zorder=10, color='steelblue', alpha=0.5) if axis_list is not None: colors = ['orange', 'red'] for color, axis in zip(colors, axis_list): axis /= axis.std() x_axis, y_axis = axis # Trick to get legend to work plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color) plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6, color=color) plt.hlines(0, -3, 3) plt.vlines(0, -3, 3) plt.xlim(-3, 3) plt.ylim(-3, 3) plt.xlabel('x') plt.ylabel('y') plt.figure() plt.subplot(2, 2, 1) plot_samples(S / S.std()) plt.title('True Independent Sources') axis_list = [pca.components_.T, ica.mixing_] plt.subplot(2, 2, 2) plot_samples(X / np.std(X), axis_list=axis_list) legend = plt.legend(['PCA', 'ICA'], loc='upper right') legend.set_zorder(100) plt.title('Observations') plt.subplot(2, 2, 3) plot_samples(S_pca_ / np.std(S_pca_, axis=0)) plt.title('PCA recovered signals') plt.subplot(2, 2, 4) plot_samples(S_ica_ / np.std(S_ica_)) plt.title('ICA recovered signals') plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36) plt.show()
bsd-3-clause
q1ang/scikit-learn
doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py
254
2005
"""Build a language detector model The goal of this exercise is to train a linear classifier on text features that represent sequences of up to 3 consecutive characters so as to be recognize natural languages by using the frequencies of short character sequences as 'fingerprints'. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import Perceptron from sklearn.pipeline import Pipeline from sklearn.datasets import load_files from sklearn.cross_validation import train_test_split from sklearn import metrics # The training data folder must be passed as first argument languages_data_folder = sys.argv[1] dataset = load_files(languages_data_folder) # Split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.5) # TASK: Build a an vectorizer that splits strings into sequence of 1 to 3 # characters instead of word tokens # TASK: Build a vectorizer / classifier pipeline using the previous analyzer # the pipeline instance should stored in a variable named clf # TASK: Fit the pipeline on the training set # TASK: Predict the outcome on the testing set in a variable named y_predicted # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) #import pylab as pl #pl.matshow(cm, cmap=pl.cm.jet) #pl.show() # Predict the result on some short new sentences: sentences = [ u'This is a language detection test.', u'Ceci est un test de d\xe9tection de la langue.', u'Dies ist ein Test, um die Sprache zu erkennen.', ] predicted = clf.predict(sentences) for s, p in zip(sentences, predicted): print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
bsd-3-clause
apbard/scipy
scipy/special/basic.py
1
61138
# # Author: Travis Oliphant, 2002 # from __future__ import division, print_function, absolute_import import warnings import numpy as np import math from scipy._lib.six import xrange from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, where, mgrid, sin, place, issubdtype, extract, less, inexact, nan, zeros, sinc) from . import _ufuncs as ufuncs from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, _zeta, hankel1, hankel2, yv, kv, ndtri, poch, binom, hyp0f1) from . import specfun from . import orthogonal from ._comb import _comb_int __all__ = ['ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros', 'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula', 'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk', 'erf_zeros', 'erfcinv', 'erfinv', 'euler', 'factorial', 'factorialk', 'factorial2', 'fresnel_zeros', 'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'h1vp', 'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros', 'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros', 'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv', 'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a', 'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri', 'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm', 'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn', 'sinc', 'y0_zeros', 'y1_zeros', 'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta'] def diric(x, n): """Periodic sinc function, also called the Dirichlet function. The Dirichlet function is defined as:: diric(x) = sin(x * n/2) / (n * sin(x / 2)), where `n` is a positive integer. Parameters ---------- x : array_like Input data n : int Integer defining the periodicity. Returns ------- diric : ndarray Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> x = np.linspace(-8*np.pi, 8*np.pi, num=201) >>> plt.figure(figsize=(8, 8)); >>> for idx, n in enumerate([2, 3, 4, 9]): ... plt.subplot(2, 2, idx+1) ... plt.plot(x, special.diric(x, n)) ... plt.title('diric, n={}'.format(n)) >>> plt.show() The following example demonstrates that `diric` gives the magnitudes (modulo the sign and scaling) of the Fourier coefficients of a rectangular pulse. Suppress output of values that are effectively 0: >>> np.set_printoptions(suppress=True) Create a signal `x` of length `m` with `k` ones: >>> m = 8 >>> k = 3 >>> x = np.zeros(m) >>> x[:k] = 1 Use the FFT to compute the Fourier transform of `x`, and inspect the magnitudes of the coefficients: >>> np.abs(np.fft.fft(x)) array([ 3. , 2.41421356, 1. , 0.41421356, 1. , 0.41421356, 1. , 2.41421356]) Now find the same values (up to sign) using `diric`. We multiply by `k` to account for the different scaling conventions of `numpy.fft.fft` and `diric`: >>> theta = np.linspace(0, 2*np.pi, m, endpoint=False) >>> k * special.diric(theta, k) array([ 3. , 2.41421356, 1. , -0.41421356, -1. , -0.41421356, 1. , 2.41421356]) """ x, n = asarray(x), asarray(n) n = asarray(n + (x-x)) x = asarray(x + (n-n)) if issubdtype(x.dtype, inexact): ytype = x.dtype else: ytype = float y = zeros(x.shape, ytype) # empirical minval for 32, 64 or 128 bit float computations # where sin(x/2) < minval, result is fixed at +1 or -1 if np.finfo(ytype).eps < 1e-18: minval = 1e-11 elif np.finfo(ytype).eps < 1e-15: minval = 1e-7 else: minval = 1e-3 mask1 = (n <= 0) | (n != floor(n)) place(y, mask1, nan) x = x / 2 denom = sin(x) mask2 = (1-mask1) & (abs(denom) < minval) xsub = extract(mask2, x) nsub = extract(mask2, n) zsub = xsub / pi place(y, mask2, pow(-1, np.round(zsub)*(nsub-1))) mask = (1-mask1) & (1-mask2) xsub = extract(mask, x) nsub = extract(mask, n) dsub = extract(mask, denom) place(y, mask, sin(nsub*xsub)/(nsub*dsub)) return y def jnjnp_zeros(nt): """Compute zeros of integer-order Bessel functions Jn and Jn'. Results are arranged in order of the magnitudes of the zeros. Parameters ---------- nt : int Number (<=1200) of zeros to compute Returns ------- zo[l-1] : ndarray Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`. n[l-1] : ndarray Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`. m[l-1] : ndarray Serial number of the zeros of Jn(x) or Jn'(x) associated with lth zero. Of length `nt`. t[l-1] : ndarray 0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of length `nt`. See Also -------- jn_zeros, jnp_zeros : to get separated arrays of zeros. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200): raise ValueError("Number must be integer <= 1200.") nt = int(nt) n, m, t, zo = specfun.jdzo(nt) return zo[1:nt+1], n[:nt], m[:nt], t[:nt] def jnyn_zeros(n, nt): """Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. Parameters ---------- n : int Order of the Bessel functions nt : int Number (<=1200) of zeros to compute See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(nt) and isscalar(n)): raise ValueError("Arguments must be scalars.") if (floor(n) != n) or (floor(nt) != nt): raise ValueError("Arguments must be integers.") if (nt <= 0): raise ValueError("nt > 0") return specfun.jyzo(abs(n), nt) def jn_zeros(n, nt): """Compute zeros of integer-order Bessel function Jn(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ return jnyn_zeros(n, nt)[0] def jnp_zeros(n, nt): """Compute zeros of integer-order Bessel function derivative Jn'(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ return jnyn_zeros(n, nt)[1] def yn_zeros(n, nt): """Compute zeros of integer-order Bessel function Yn(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ return jnyn_zeros(n, nt)[2] def ynp_zeros(n, nt): """Compute zeros of integer-order Bessel function derivative Yn'(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ return jnyn_zeros(n, nt)[3] def y0_zeros(nt, complex=False): """Compute nt zeros of Bessel function Y0(z), and derivative at each zero. The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0. Parameters ---------- nt : int Number of zeros to return complex : bool, default False Set to False to return only the real zeros; set to True to return only the complex zeros with negative real part and positive imaginary part. Note that the complex conjugates of the latter are also zeros of the function, but are not returned by this routine. Returns ------- z0n : ndarray Location of nth zero of Y0(z) y0pz0n : ndarray Value of derivative Y0'(z0) for nth zero References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("Arguments must be scalar positive integer.") kf = 0 kc = not complex return specfun.cyzo(nt, kf, kc) def y1_zeros(nt, complex=False): """Compute nt zeros of Bessel function Y1(z), and derivative at each zero. The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1. Parameters ---------- nt : int Number of zeros to return complex : bool, default False Set to False to return only the real zeros; set to True to return only the complex zeros with negative real part and positive imaginary part. Note that the complex conjugates of the latter are also zeros of the function, but are not returned by this routine. Returns ------- z1n : ndarray Location of nth zero of Y1(z) y1pz1n : ndarray Value of derivative Y1'(z1) for nth zero References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("Arguments must be scalar positive integer.") kf = 1 kc = not complex return specfun.cyzo(nt, kf, kc) def y1p_zeros(nt, complex=False): """Compute nt zeros of Bessel derivative Y1'(z), and value at each zero. The values are given by Y1(z1) at each z1 where Y1'(z1)=0. Parameters ---------- nt : int Number of zeros to return complex : bool, default False Set to False to return only the real zeros; set to True to return only the complex zeros with negative real part and positive imaginary part. Note that the complex conjugates of the latter are also zeros of the function, but are not returned by this routine. Returns ------- z1pn : ndarray Location of nth zero of Y1'(z) y1z1pn : ndarray Value of derivative Y1(z1) for nth zero References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("Arguments must be scalar positive integer.") kf = 2 kc = not complex return specfun.cyzo(nt, kf, kc) def _bessel_diff_formula(v, z, n, L, phase): # from AMS55. # L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1 # L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1 # For K, you can pull out the exp((v-k)*pi*i) into the caller v = asarray(v) p = 1.0 s = L(v-n, z) for i in xrange(1, n+1): p = phase * (p * (n-i+1)) / i # = choose(k, i) s += p*L(v-n + i*2, z) return s / (2.**n) bessel_diff_formula = np.deprecate(_bessel_diff_formula, message="bessel_diff_formula is a private function, do not use it!") def jvp(v, z, n=1): """Compute nth derivative of Bessel function Jv(z) with respect to `z`. Parameters ---------- v : float Order of Bessel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ if not isinstance(n, int) or (n < 0): raise ValueError("n must be a non-negative integer.") if n == 0: return jv(v, z) else: return _bessel_diff_formula(v, z, n, jv, -1) def yvp(v, z, n=1): """Compute nth derivative of Bessel function Yv(z) with respect to `z`. Parameters ---------- v : float Order of Bessel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ if not isinstance(n, int) or (n < 0): raise ValueError("n must be a non-negative integer.") if n == 0: return yv(v, z) else: return _bessel_diff_formula(v, z, n, yv, -1) def kvp(v, z, n=1): """Compute nth derivative of real-order modified Bessel function Kv(z) Kv(z) is the modified Bessel function of the second kind. Derivative is calculated with respect to `z`. Parameters ---------- v : array_like of float Order of Bessel function z : array_like of complex Argument at which to evaluate the derivative n : int Order of derivative. Default is first derivative. Returns ------- out : ndarray The results Examples -------- Calculate multiple values at order 5: >>> from scipy.special import kvp >>> kvp(5, (1, 2, 3+5j)) array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j]) Calculate for a single value at multiple orders: >>> kvp((4, 4.5, 5), 1) array([ -184.0309, -568.9585, -1849.0354]) Notes ----- The derivative is computed using the relation DLFM 10.29.5 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 6. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.29.E5 """ if not isinstance(n, int) or (n < 0): raise ValueError("n must be a non-negative integer.") if n == 0: return kv(v, z) else: return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1) def ivp(v, z, n=1): """Compute nth derivative of modified Bessel function Iv(z) with respect to `z`. Parameters ---------- v : array_like of float Order of Bessel function z : array_like of complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.29.5 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 6. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.29.E5 """ if not isinstance(n, int) or (n < 0): raise ValueError("n must be a non-negative integer.") if n == 0: return iv(v, z) else: return _bessel_diff_formula(v, z, n, iv, 1) def h1vp(v, z, n=1): """Compute nth derivative of Hankel function H1v(z) with respect to `z`. Parameters ---------- v : float Order of Hankel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ if not isinstance(n, int) or (n < 0): raise ValueError("n must be a non-negative integer.") if n == 0: return hankel1(v, z) else: return _bessel_diff_formula(v, z, n, hankel1, -1) def h2vp(v, z, n=1): """Compute nth derivative of Hankel function H2v(z) with respect to `z`. Parameters ---------- v : float Order of Hankel function z : complex Argument at which to evaluate the derivative n : int, default 1 Order of derivative Notes ----- The derivative is computed using the relation DLFM 10.6.7 [2]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.6.E7 """ if not isinstance(n, int) or (n < 0): raise ValueError("n must be a non-negative integer.") if n == 0: return hankel2(v, z) else: return _bessel_diff_formula(v, z, n, hankel2, -1) def riccati_jn(n, x): r"""Compute Ricatti-Bessel function of the first kind and its derivative. The Ricatti-Bessel function of the first kind is defined as :math:`x j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first kind of order :math:`n`. This function computes the value and first derivative of the Ricatti-Bessel function for all orders up to and including `n`. Parameters ---------- n : int Maximum order of function to compute x : float Argument at which to evaluate Returns ------- jn : ndarray Value of j0(x), ..., jn(x) jnp : ndarray First derivative j0'(x), ..., jn'(x) Notes ----- The computation is carried out via backward recurrence, using the relation DLMF 10.51.1 [2]_. Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.51.E1 """ if not (isscalar(n) and isscalar(x)): raise ValueError("arguments must be scalars.") if (n != floor(n)) or (n < 0): raise ValueError("n must be a non-negative integer.") if (n == 0): n1 = 1 else: n1 = n nm, jn, jnp = specfun.rctj(n1, x) return jn[:(n+1)], jnp[:(n+1)] def riccati_yn(n, x): """Compute Ricatti-Bessel function of the second kind and its derivative. The Ricatti-Bessel function of the second kind is defined as :math:`x y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second kind of order :math:`n`. This function computes the value and first derivative of the function for all orders up to and including `n`. Parameters ---------- n : int Maximum order of function to compute x : float Argument at which to evaluate Returns ------- yn : ndarray Value of y0(x), ..., yn(x) ynp : ndarray First derivative y0'(x), ..., yn'(x) Notes ----- The computation is carried out via ascending recurrence, using the relation DLMF 10.51.1 [2]_. Wrapper for a Fortran routine created by Shanjie Zhang and Jianming Jin [1]_. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions. http://dlmf.nist.gov/10.51.E1 """ if not (isscalar(n) and isscalar(x)): raise ValueError("arguments must be scalars.") if (n != floor(n)) or (n < 0): raise ValueError("n must be a non-negative integer.") if (n == 0): n1 = 1 else: n1 = n nm, jn, jnp = specfun.rcty(n1, x) return jn[:(n+1)], jnp[:(n+1)] def erfinv(y): """Inverse function for erf. """ return ndtri((y+1)/2.0)/sqrt(2) def erfcinv(y): """Inverse function for erfc. """ return -ndtri(0.5*y)/sqrt(2) def erf_zeros(nt): """Compute nt complex zeros of error function erf(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.cerzo(nt) def fresnelc_zeros(nt): """Compute nt complex zeros of cosine Fresnel integral C(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.fcszo(1, nt) def fresnels_zeros(nt): """Compute nt complex zeros of sine Fresnel integral S(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.fcszo(2, nt) def fresnel_zeros(nt): """Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt): raise ValueError("Argument must be positive scalar integer.") return specfun.fcszo(2, nt), specfun.fcszo(1, nt) def assoc_laguerre(x, n, k=0.0): """Compute the generalized (associated) Laguerre polynomial of degree n and order k. The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``, with weighting function ``exp(-x) * x**k`` with ``k > -1``. Notes ----- `assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with reversed argument order ``(x, n, k=0.0) --> (n, k, x)``. """ return orthogonal.eval_genlaguerre(n, k, x) digamma = psi def polygamma(n, x): """Polygamma function n. This is the nth derivative of the digamma (psi) function. Parameters ---------- n : array_like of int The order of the derivative of `psi`. x : array_like Where to evaluate the polygamma function. Returns ------- polygamma : ndarray The result. Examples -------- >>> from scipy import special >>> x = [2, 3, 25.5] >>> special.polygamma(1, x) array([ 0.64493407, 0.39493407, 0.03999467]) >>> special.polygamma(0, x) == special.psi(x) array([ True, True, True], dtype=bool) """ n, x = asarray(n), asarray(x) fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x) return where(n == 0, psi(x), fac2) def mathieu_even_coef(m, q): r"""Fourier coefficients for even Mathieu and modified Mathieu functions. The Fourier series of the even solutions of the Mathieu differential equation are of the form .. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz .. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input m=2n+1. Parameters ---------- m : int Order of Mathieu functions. Must be non-negative. q : float (>=0) Parameter of Mathieu functions. Must be non-negative. Returns ------- Ak : ndarray Even or odd Fourier coefficients, corresponding to even or odd m. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/28.4#i """ if not (isscalar(m) and isscalar(q)): raise ValueError("m and q must be scalars.") if (q < 0): raise ValueError("q >=0") if (m != floor(m)) or (m < 0): raise ValueError("m must be an integer >=0.") if (q <= 1): qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q else: qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q km = int(qm + 0.5*m) if km > 251: print("Warning, too many predicted coefficients.") kd = 1 m = int(floor(m)) if m % 2: kd = 2 a = mathieu_a(m, q) fc = specfun.fcoef(kd, m, q, a) return fc[:km] def mathieu_odd_coef(m, q): r"""Fourier coefficients for even Mathieu and modified Mathieu functions. The Fourier series of the odd solutions of the Mathieu differential equation are of the form .. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z .. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd input m=2n+1. Parameters ---------- m : int Order of Mathieu functions. Must be non-negative. q : float (>=0) Parameter of Mathieu functions. Must be non-negative. Returns ------- Bk : ndarray Even or odd Fourier coefficients, corresponding to even or odd m. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(m) and isscalar(q)): raise ValueError("m and q must be scalars.") if (q < 0): raise ValueError("q >=0") if (m != floor(m)) or (m <= 0): raise ValueError("m must be an integer > 0") if (q <= 1): qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q else: qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q km = int(qm + 0.5*m) if km > 251: print("Warning, too many predicted coefficients.") kd = 4 m = int(floor(m)) if m % 2: kd = 3 b = mathieu_b(m, q) fc = specfun.fcoef(kd, m, q, b) return fc[:km] def lpmn(m, n, z): """Sequence of associated Legendre functions of the first kind. Computes the associated Legendre function of the first kind of order m and degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. This function takes a real argument ``z``. For complex arguments ``z`` use clpmn instead. Parameters ---------- m : int ``|m| <= n``; the order of the Legendre function. n : int where ``n >= 0``; the degree of the Legendre function. Often called ``l`` (lower case L) in descriptions of the associated Legendre function z : float Input value. Returns ------- Pmn_z : (m+1, n+1) array Values for all orders 0..m and degrees 0..n Pmn_d_z : (m+1, n+1) array Derivatives for all orders 0..m and degrees 0..n See Also -------- clpmn: associated Legendre functions of the first kind for complex z Notes ----- In the interval (-1, 1), Ferrer's function of the first kind is returned. The phase convention used for the intervals (1, inf) and (-inf, -1) is such that the result is always real. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/14.3 """ if not isscalar(m) or (abs(m) > n): raise ValueError("m must be <= n.") if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") if iscomplex(z): raise ValueError("Argument must be real. Use clpmn instead.") if (m < 0): mp = -m mf, nf = mgrid[0:mp+1, 0:n+1] with ufuncs.errstate(all='ignore'): if abs(z) < 1: # Ferrer function; DLMF 14.9.3 fixarr = where(mf > nf, 0.0, (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) else: # Match to clpmn; DLMF 14.9.13 fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) else: mp = m p, pd = specfun.lpmn(mp, n, z) if (m < 0): p = p * fixarr pd = pd * fixarr return p, pd def clpmn(m, n, z, type=3): """Associated Legendre function of the first kind for complex arguments. Computes the associated Legendre function of the first kind of order m and degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``. Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. Parameters ---------- m : int ``|m| <= n``; the order of the Legendre function. n : int where ``n >= 0``; the degree of the Legendre function. Often called ``l`` (lower case L) in descriptions of the associated Legendre function z : float or complex Input value. type : int, optional takes values 2 or 3 2: cut on the real axis ``|x| > 1`` 3: cut on the real axis ``-1 < x < 1`` (default) Returns ------- Pmn_z : (m+1, n+1) array Values for all orders ``0..m`` and degrees ``0..n`` Pmn_d_z : (m+1, n+1) array Derivatives for all orders ``0..m`` and degrees ``0..n`` See Also -------- lpmn: associated Legendre functions of the first kind for real z Notes ----- By default, i.e. for ``type=3``, phase conventions are chosen according to [1]_ such that the function is analytic. The cut lies on the interval (-1, 1). Approaching the cut from above or below in general yields a phase factor with respect to Ferrer's function of the first kind (cf. `lpmn`). For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values on the interval (-1, 1) in the complex plane yields Ferrer's function of the first kind. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html .. [2] NIST Digital Library of Mathematical Functions http://dlmf.nist.gov/14.21 """ if not isscalar(m) or (abs(m) > n): raise ValueError("m must be <= n.") if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") if not(type == 2 or type == 3): raise ValueError("type must be either 2 or 3.") if (m < 0): mp = -m mf, nf = mgrid[0:mp+1, 0:n+1] with ufuncs.errstate(all='ignore'): if type == 2: fixarr = where(mf > nf, 0.0, (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1)) else: fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1)) else: mp = m p, pd = specfun.clpmn(mp, n, real(z), imag(z), type) if (m < 0): p = p * fixarr pd = pd * fixarr return p, pd def lqmn(m, n, z): """Sequence of associated Legendre functions of the second kind. Computes the associated Legendre function of the second kind of order m and degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``. Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``. Parameters ---------- m : int ``|m| <= n``; the order of the Legendre function. n : int where ``n >= 0``; the degree of the Legendre function. Often called ``l`` (lower case L) in descriptions of the associated Legendre function z : complex Input value. Returns ------- Qmn_z : (m+1, n+1) array Values for all orders 0..m and degrees 0..n Qmn_d_z : (m+1, n+1) array Derivatives for all orders 0..m and degrees 0..n References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(m) or (m < 0): raise ValueError("m must be a non-negative integer.") if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") if not isscalar(z): raise ValueError("z must be scalar.") m = int(m) n = int(n) # Ensure neither m nor n == 0 mm = max(1, m) nn = max(1, n) if iscomplex(z): q, qd = specfun.clqmn(mm, nn, z) else: q, qd = specfun.lqmn(mm, nn, z) return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)] def bernoulli(n): """Bernoulli numbers B0..Bn (inclusive). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") n = int(n) if (n < 2): n1 = 2 else: n1 = n return specfun.bernob(int(n1))[:(n+1)] def euler(n): """Euler numbers E0..En (inclusive). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(n) or (n < 0): raise ValueError("n must be a non-negative integer.") n = int(n) if (n < 2): n1 = 2 else: n1 = n return specfun.eulerb(n1)[:(n+1)] def lpn(n, z): """Legendre function of the first kind. Compute sequence of Legendre functions of the first kind (polynomials), Pn(z) and derivatives for all degrees from 0 to n (inclusive). See also special.legendre for polynomial class. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(n) and isscalar(z)): raise ValueError("arguments must be scalars.") if (n != floor(n)) or (n < 0): raise ValueError("n must be a non-negative integer.") if (n < 1): n1 = 1 else: n1 = n if iscomplex(z): pn, pd = specfun.clpn(n1, z) else: pn, pd = specfun.lpn(n1, z) return pn[:(n+1)], pd[:(n+1)] def lqn(n, z): """Legendre function of the second kind. Compute sequence of Legendre functions of the second kind, Qn(z) and derivatives for all degrees from 0 to n (inclusive). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(n) and isscalar(z)): raise ValueError("arguments must be scalars.") if (n != floor(n)) or (n < 0): raise ValueError("n must be a non-negative integer.") if (n < 1): n1 = 1 else: n1 = n if iscomplex(z): qn, qd = specfun.clqn(n1, z) else: qn, qd = specfun.lqnb(n1, z) return qn[:(n+1)], qd[:(n+1)] def ai_zeros(nt): """ Compute `nt` zeros and values of the Airy function Ai and its derivative. Computes the first `nt` zeros, `a`, of the Airy function Ai(x); first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x); the corresponding values Ai(a'); and the corresponding values Ai'(a). Parameters ---------- nt : int Number of zeros to compute Returns ------- a : ndarray First `nt` zeros of Ai(x) ap : ndarray First `nt` zeros of Ai'(x) ai : ndarray Values of Ai(x) evaluated at first `nt` zeros of Ai'(x) aip : ndarray Values of Ai'(x) evaluated at first `nt` zeros of Ai(x) References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ kf = 1 if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be a positive integer scalar.") return specfun.airyzo(nt, kf) def bi_zeros(nt): """ Compute `nt` zeros and values of the Airy function Bi and its derivative. Computes the first `nt` zeros, b, of the Airy function Bi(x); first `nt` zeros, b', of the derivative of the Airy function Bi'(x); the corresponding values Bi(b'); and the corresponding values Bi'(b). Parameters ---------- nt : int Number of zeros to compute Returns ------- b : ndarray First `nt` zeros of Bi(x) bp : ndarray First `nt` zeros of Bi'(x) bi : ndarray Values of Bi(x) evaluated at first `nt` zeros of Bi'(x) bip : ndarray Values of Bi'(x) evaluated at first `nt` zeros of Bi(x) References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ kf = 2 if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be a positive integer scalar.") return specfun.airyzo(nt, kf) def lmbda(v, x): r"""Jahnke-Emden Lambda function, Lambdav(x). This function is defined as [2]_, .. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v}, where :math:`\Gamma` is the gamma function and :math:`J_v` is the Bessel function of the first kind. Parameters ---------- v : float Order of the Lambda function x : float Value at which to evaluate the function and derivatives Returns ------- vl : ndarray Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dl : ndarray Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html .. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and Curves" (4th ed.), Dover, 1945 """ if not (isscalar(v) and isscalar(x)): raise ValueError("arguments must be scalars.") if (v < 0): raise ValueError("argument must be > 0.") n = int(v) v0 = v - n if (n < 1): n1 = 1 else: n1 = n v1 = n1 + v0 if (v != floor(v)): vm, vl, dl = specfun.lamv(v1, x) else: vm, vl, dl = specfun.lamn(v1, x) return vl[:(n+1)], dl[:(n+1)] def pbdv_seq(v, x): """Parabolic cylinder functions Dv(x) and derivatives. Parameters ---------- v : float Order of the parabolic cylinder function x : float Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dp : ndarray Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(v) and isscalar(x)): raise ValueError("arguments must be scalars.") n = int(v) v0 = v-n if (n < 1): n1 = 1 else: n1 = n v1 = n1 + v0 dv, dp, pdf, pdd = specfun.pbdv(v1, x) return dv[:n1+1], dp[:n1+1] def pbvv_seq(v, x): """Parabolic cylinder functions Vv(x) and derivatives. Parameters ---------- v : float Order of the parabolic cylinder function x : float Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dp : ndarray Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(v) and isscalar(x)): raise ValueError("arguments must be scalars.") n = int(v) v0 = v-n if (n <= 1): n1 = 1 else: n1 = n v1 = n1 + v0 dv, dp, pdf, pdd = specfun.pbvv(v1, x) return dv[:n1+1], dp[:n1+1] def pbdn_seq(n, z): """Parabolic cylinder functions Dn(z) and derivatives. Parameters ---------- n : int Order of the parabolic cylinder function z : complex Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_i(z), for i=0, ..., i=n. dp : ndarray Derivatives D_i'(z), for i=0, ..., i=n. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 13. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(n) and isscalar(z)): raise ValueError("arguments must be scalars.") if (floor(n) != n): raise ValueError("n must be an integer.") if (abs(n) <= 1): n1 = 1 else: n1 = n cpb, cpd = specfun.cpbdn(n1, z) return cpb[:n1+1], cpd[:n1+1] def ber_zeros(nt): """Compute nt zeros of the Kelvin function ber(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 1) def bei_zeros(nt): """Compute nt zeros of the Kelvin function bei(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 2) def ker_zeros(nt): """Compute nt zeros of the Kelvin function ker(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 3) def kei_zeros(nt): """Compute nt zeros of the Kelvin function kei(x). """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 4) def berp_zeros(nt): """Compute nt zeros of the Kelvin function ber'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 5) def beip_zeros(nt): """Compute nt zeros of the Kelvin function bei'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 6) def kerp_zeros(nt): """Compute nt zeros of the Kelvin function ker'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 7) def keip_zeros(nt): """Compute nt zeros of the Kelvin function kei'(x). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return specfun.klvnzo(nt, 8) def kelvin_zeros(nt): """Compute nt zeros of all Kelvin functions. Returned in a length-8 tuple of arrays of length nt. The tuple contains the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei'). References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0): raise ValueError("nt must be positive integer scalar.") return (specfun.klvnzo(nt, 1), specfun.klvnzo(nt, 2), specfun.klvnzo(nt, 3), specfun.klvnzo(nt, 4), specfun.klvnzo(nt, 5), specfun.klvnzo(nt, 6), specfun.klvnzo(nt, 7), specfun.klvnzo(nt, 8)) def pro_cv_seq(m, n, c): """Characteristic values for prolate spheroidal wave functions. Compute a sequence of characteristic values for the prolate spheroidal wave functions for mode m and n'=m..n and spheroidal parameter c. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(m) and isscalar(n) and isscalar(c)): raise ValueError("Arguments must be scalars.") if (n != floor(n)) or (m != floor(m)): raise ValueError("Modes must be integers.") if (n-m > 199): raise ValueError("Difference between n and m is too large.") maxL = n-m+1 return specfun.segv(m, n, c, 1)[1][:maxL] def obl_cv_seq(m, n, c): """Characteristic values for oblate spheroidal wave functions. Compute a sequence of characteristic values for the oblate spheroidal wave functions for mode m and n'=m..n and spheroidal parameter c. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996. http://jin.ece.illinois.edu/specfunc.html """ if not (isscalar(m) and isscalar(n) and isscalar(c)): raise ValueError("Arguments must be scalars.") if (n != floor(n)) or (m != floor(m)): raise ValueError("Modes must be integers.") if (n-m > 199): raise ValueError("Difference between n and m is too large.") maxL = n-m+1 return specfun.segv(m, n, c, -1)[1][:maxL] def ellipk(m): r"""Complete elliptic integral of the first kind. This function is defined as .. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt Parameters ---------- m : array_like The parameter of the elliptic integral. Returns ------- K : array_like Value of the elliptic integral. Notes ----- For more precision around point m = 1, use `ellipkm1`, which this function calls. The parameterization in terms of :math:`m` follows that of section 17.2 in [1]_. Other parameterizations in terms of the complementary parameter :math:`1 - m`, modular angle :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also used, so be careful that you choose the correct parameter. See Also -------- ellipkm1 : Complete elliptic integral of the first kind around m = 1 ellipkinc : Incomplete elliptic integral of the first kind ellipe : Complete elliptic integral of the second kind ellipeinc : Incomplete elliptic integral of the second kind References ---------- .. [1] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972. """ return ellipkm1(1 - asarray(m)) def comb(N, k, exact=False, repetition=False): """The number of combinations of N things taken k at a time. This is often expressed as "N choose k". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : bool, optional If `exact` is False, then floating point precision is used, otherwise exact long integer is computed. repetition : bool, optional If `repetition` is True, then the number of combinations with repetition is computed. Returns ------- val : int, float, ndarray The total number of combinations. See Also -------- binom : Binomial coefficient ufunc Notes ----- - Array arguments accepted only for exact=False case. - If k > N, N < 0, or k < 0, then a 0 is returned. Examples -------- >>> from scipy.special import comb >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> comb(n, k, exact=False) array([ 120., 210.]) >>> comb(10, 3, exact=True) 120L >>> comb(10, 3, exact=True, repetition=True) 220L """ if repetition: return comb(N + k - 1, k, exact) if exact: return _comb_int(N, k) else: k, N = asarray(k), asarray(N) cond = (k <= N) & (N >= 0) & (k >= 0) vals = binom(N, k) if isinstance(vals, np.ndarray): vals[~cond] = 0 elif not cond: vals = np.float64(0) return vals def perm(N, k, exact=False): """Permutations of N things taken k at a time, i.e., k-permutations of N. It's also known as "partial permutations". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : bool, optional If `exact` is False, then floating point precision is used, otherwise exact long integer is computed. Returns ------- val : int, ndarray The number of k-permutations of N. Notes ----- - Array arguments accepted only for exact=False case. - If k > N, N < 0, or k < 0, then a 0 is returned. Examples -------- >>> from scipy.special import perm >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> perm(n, k) array([ 720., 5040.]) >>> perm(10, 3, exact=True) 720 """ if exact: if (k > N) or (N < 0) or (k < 0): return 0 val = 1 for i in xrange(N - k + 1, N + 1): val *= i return val else: k, N = asarray(k), asarray(N) cond = (k <= N) & (N >= 0) & (k >= 0) vals = poch(N - k + 1, k) if isinstance(vals, np.ndarray): vals[~cond] = 0 elif not cond: vals = np.float64(0) return vals # http://stackoverflow.com/a/16327037/125507 def _range_prod(lo, hi): """ Product of a range of numbers. Returns the product of lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi = hi! / (lo-1)! Breaks into smaller products first for speed: _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9)) """ if lo + 1 < hi: mid = (hi + lo) // 2 return _range_prod(lo, mid) * _range_prod(mid + 1, hi) if lo == hi: return lo return lo * hi def factorial(n, exact=False): """ The factorial of a number or array of numbers. The factorial of non-negative integer `n` is the product of all positive integers less than or equal to `n`:: n! = n * (n - 1) * (n - 2) * ... * 1 Parameters ---------- n : int or array_like of ints Input values. If ``n < 0``, the return value is 0. exact : bool, optional If True, calculate the answer exactly using long integer arithmetic. If False, result is approximated in floating point rapidly using the `gamma` function. Default is False. Returns ------- nf : float or int or ndarray Factorial of `n`, as integer or float depending on `exact`. Notes ----- For arrays with ``exact=True``, the factorial is computed only once, for the largest input, with each other result computed in the process. The output dtype is increased to ``int64`` or ``object`` if necessary. With ``exact=False`` the factorial is approximated using the gamma function: .. math:: n! = \\Gamma(n+1) Examples -------- >>> from scipy.special import factorial >>> arr = np.array([3, 4, 5]) >>> factorial(arr, exact=False) array([ 6., 24., 120.]) >>> factorial(arr, exact=True) array([ 6, 24, 120]) >>> factorial(5, exact=True) 120L """ if exact: if np.ndim(n) == 0: return 0 if n < 0 else math.factorial(n) else: n = asarray(n) un = np.unique(n).astype(object) # Convert to object array of long ints if np.int can't handle size if un[-1] > 20: dt = object elif un[-1] > 12: dt = np.int64 else: dt = np.int out = np.empty_like(n, dtype=dt) # Handle invalid/trivial values un = un[un > 1] out[n < 2] = 1 out[n < 0] = 0 # Calculate products of each range of numbers if un.size: val = math.factorial(un[0]) out[n == un[0]] = val for i in xrange(len(un) - 1): prev = un[i] + 1 current = un[i + 1] val *= _range_prod(prev, current) out[n == current] = val return out else: n = asarray(n) vals = gamma(n + 1) return where(n >= 0, vals, 0) def factorial2(n, exact=False): """Double factorial. This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as:: n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd = 2**(n/2) * (n/2)! n even Parameters ---------- n : int or array_like Calculate ``n!!``. Arrays are only supported with `exact` set to False. If ``n < 0``, the return value is 0. exact : bool, optional The result can be approximated rapidly using the gamma-formula above (default). If `exact` is set to True, calculate the answer exactly using integer arithmetic. Returns ------- nff : float or int Double factorial of `n`, as an int or a float depending on `exact`. Examples -------- >>> from scipy.special import factorial2 >>> factorial2(7, exact=False) array(105.00000000000001) >>> factorial2(7, exact=True) 105L """ if exact: if n < -1: return 0 if n <= 0: return 1 val = 1 for k in xrange(n, 0, -2): val *= k return val else: n = asarray(n) vals = zeros(n.shape, 'd') cond1 = (n % 2) & (n >= -1) cond2 = (1-(n % 2)) & (n >= -1) oddn = extract(cond1, n) evenn = extract(cond2, n) nd2o = oddn / 2.0 nd2e = evenn / 2.0 place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5)) place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e)) return vals def factorialk(n, k, exact=True): """Multifactorial of n of order k, n(!!...!). This is the multifactorial of n skipping k values. For example, factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1 In particular, for any integer ``n``, we have factorialk(n, 1) = factorial(n) factorialk(n, 2) = factorial2(n) Parameters ---------- n : int Calculate multifactorial. If `n` < 0, the return value is 0. k : int Order of multifactorial. exact : bool, optional If exact is set to True, calculate the answer exactly using integer arithmetic. Returns ------- val : int Multifactorial of `n`. Raises ------ NotImplementedError Raises when exact is False Examples -------- >>> from scipy.special import factorialk >>> factorialk(5, 1, exact=True) 120L >>> factorialk(5, 3, exact=True) 10L """ if exact: if n < 1-k: return 0 if n <= 0: return 1 val = 1 for j in xrange(n, 0, -k): val = val*j return val else: raise NotImplementedError def zeta(x, q=None, out=None): r""" Riemann or Hurwitz zeta function. Parameters ---------- x : array_like of float Input data, must be real q : array_like of float, optional Input data, must be real. Defaults to Riemann zeta. out : ndarray, optional Output array for the computed values. Notes ----- The two-argument version is the Hurwitz zeta function: .. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}, Riemann zeta function corresponds to ``q = 1``. See also -------- zetac """ if q is None: q = 1 return _zeta(x, q, out)
bsd-3-clause
passiweinberger/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/bezier.py
70
14387
""" A module providing some utility functions regarding bezier path manipulation. """ import numpy as np from math import sqrt from matplotlib.path import Path from operator import xor # some functions def get_intersection(cx1, cy1, cos_t1, sin_t1, cx2, cy2, cos_t2, sin_t2): """ return a intersecting point between a line through (cx1, cy1) and having angle t1 and a line through (cx2, cy2) and angle t2. """ # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0. # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1 line1_rhs = sin_t1 * cx1 - cos_t1 * cy1 line2_rhs = sin_t2 * cx2 - cos_t2 * cy2 # rhs matrix a, b = sin_t1, -cos_t1 c, d = sin_t2, -cos_t2 ad_bc = a*d-b*c if ad_bc == 0.: raise ValueError("Given lines do not intersect") #rhs_inverse a_, b_ = d, -b c_, d_ = -c, a a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]] x = a_* line1_rhs + b_ * line2_rhs y = c_* line1_rhs + d_ * line2_rhs return x, y def get_normal_points(cx, cy, cos_t, sin_t, length): """ For a line passing through (*cx*, *cy*) and having a angle *t*, return locations of the two points located along its perpendicular line at the distance of *length*. """ if length == 0.: return cx, cy, cx, cy cos_t1, sin_t1 = sin_t, -cos_t cos_t2, sin_t2 = -sin_t, cos_t x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy return x1, y1, x2, y2 ## BEZIER routines # subdividing bezier curve # http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html def _de_casteljau1(beta, t): next_beta = beta[:-1] * (1-t) + beta[1:] * t return next_beta def split_de_casteljau(beta, t): """split a bezier segment defined by its controlpoints *beta* into two separate segment divided at *t* and return their control points. """ beta = np.asarray(beta) beta_list = [beta] while True: beta = _de_casteljau1(beta, t) beta_list.append(beta) if len(beta) == 1: break left_beta = [beta[0] for beta in beta_list] right_beta = [beta[-1] for beta in reversed(beta_list)] return left_beta, right_beta def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerence=0.01): """ Find a parameter t0 and t1 of the given bezier path which bounds the intersecting points with a provided closed path(*inside_closedpath*). Search starts from *t0* and *t1* and it uses a simple bisecting algorithm therefore one of the end point must be inside the path while the orther doesn't. The search stop when |t0-t1| gets smaller than the given tolerence. value for - bezier_point_at_t : a function which returns x, y coordinates at *t* - inside_closedpath : return True if the point is insed the path """ # inside_closedpath : function start = bezier_point_at_t(t0) end = bezier_point_at_t(t1) start_inside = inside_closedpath(start) end_inside = inside_closedpath(end) if not xor(start_inside, end_inside): raise ValueError("the segment does not seemed to intersect with the path") while 1: # return if the distance is smaller than the tolerence if (start[0]-end[0])**2 + (start[1]-end[1])**2 < tolerence**2: return t0, t1 # calculate the middle point middle_t = 0.5*(t0+t1) middle = bezier_point_at_t(middle_t) middle_inside = inside_closedpath(middle) if xor(start_inside, middle_inside): t1 = middle_t end = middle end_inside = middle_inside else: t0 = middle_t start = middle start_inside = middle_inside class BezierSegment: """ A simple class of a 2-dimensional bezier segment """ # Highrt order bezier lines can be supported by simplying adding # correcponding values. _binom_coeff = {1:np.array([1., 1.]), 2:np.array([1., 2., 1.]), 3:np.array([1., 3., 3., 1.])} def __init__(self, control_points): """ *control_points* : location of contol points. It needs have a shpae of n * 2, where n is the order of the bezier line. 1<= n <= 3 is supported. """ _o = len(control_points) self._orders = np.arange(_o) _coeff = BezierSegment._binom_coeff[_o - 1] _control_points = np.asarray(control_points) xx = _control_points[:,0] yy = _control_points[:,1] self._px = xx * _coeff self._py = yy * _coeff def point_at_t(self, t): "evaluate a point at t" one_minus_t_powers = np.power(1.-t, self._orders)[::-1] t_powers = np.power(t, self._orders) tt = one_minus_t_powers * t_powers _x = sum(tt * self._px) _y = sum(tt * self._py) return _x, _y def split_bezier_intersecting_with_closedpath(bezier, inside_closedpath, tolerence=0.01): """ bezier : control points of the bezier segment inside_closedpath : a function which returns true if the point is inside the path """ bz = BezierSegment(bezier) bezier_point_at_t = bz.point_at_t t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath, tolerence=tolerence) _left, _right = split_de_casteljau(bezier, (t0+t1)/2.) return _left, _right def find_r_to_boundary_of_closedpath(inside_closedpath, xy, cos_t, sin_t, rmin=0., rmax=1., tolerence=0.01): """ Find a radius r (centered at *xy*) between *rmin* and *rmax* at which it intersect with the path. inside_closedpath : function cx, cy : center cos_t, sin_t : cosine and sine for the angle rmin, rmax : """ cx, cy = xy def _f(r): return cos_t*r + cx, sin_t*r + cy find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath, t0=rmin, t1=rmax, tolerence=tolerence) ## matplotlib specific def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False): """ divide a path into two segment at the point where inside(x, y) becomes False. """ path_iter = path.iter_segments() ctl_points, command = path_iter.next() begin_inside = inside(ctl_points[-2:]) # true if begin point is inside bezier_path = None ctl_points_old = ctl_points concat = np.concatenate iold=0 i = 1 for ctl_points, command in path_iter: iold=i i += len(ctl_points)/2 if inside(ctl_points[-2:]) != begin_inside: bezier_path = concat([ctl_points_old[-2:], ctl_points]) break ctl_points_old = ctl_points if bezier_path is None: raise ValueError("The path does not seem to intersect with the patch") bp = zip(bezier_path[::2], bezier_path[1::2]) left, right = split_bezier_intersecting_with_closedpath(bp, inside, tolerence) if len(left) == 2: codes_left = [Path.LINETO] codes_right = [Path.MOVETO, Path.LINETO] elif len(left) == 3: codes_left = [Path.CURVE3, Path.CURVE3] codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3] elif len(left) == 4: codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4] codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4] else: raise ValueError() verts_left = left[1:] verts_right = right[:] #i += 1 if path.codes is None: path_in = Path(concat([path.vertices[:i], verts_left])) path_out = Path(concat([verts_right, path.vertices[i:]])) else: path_in = Path(concat([path.vertices[:iold], verts_left]), concat([path.codes[:iold], codes_left])) path_out = Path(concat([verts_right, path.vertices[i:]]), concat([codes_right, path.codes[i:]])) if reorder_inout and begin_inside == False: path_in, path_out = path_out, path_in return path_in, path_out def inside_circle(cx, cy, r): r2 = r**2 def _f(xy): x, y = xy return (x-cx)**2 + (y-cy)**2 < r2 return _f # quadratic bezier lines def get_cos_sin(x0, y0, x1, y1): dx, dy = x1-x0, y1-y0 d = (dx*dx + dy*dy)**.5 return dx/d, dy/d def get_parallels(bezier2, width): """ Given the quadraitc bezier control points *bezier2*, returns control points of quadrativ bezier lines roughly parralel to given one separated by *width*. """ # The parallel bezier lines constructed by following ways. # c1 and c2 are contol points representing the begin and end of the bezier line. # cm is the middle point c1x, c1y = bezier2[0] cmx, cmy = bezier2[1] c2x, c2y = bezier2[2] # t1 and t2 is the anlge between c1 and cm, cm, c2. # They are also a angle of the tangential line of the path at c1 and c2 cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy) cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y) # find c1_left, c1_right which are located along the lines # throught c1 and perpendicular to the tangential lines of the # bezier path at a distance of width. Same thing for c2_left and # c2_right with respect to c2. c1x_left, c1y_left, c1x_right, c1y_right = \ get_normal_points(c1x, c1y, cos_t1, sin_t1, width) c2x_left, c2y_left, c2x_right, c2y_right = \ get_normal_points(c2x, c2y, cos_t2, sin_t2, width) # find cm_left which is the intersectng point of a line through # c1_left with angle t1 and a line throught c2_left with angle # t2. Same with cm_right. cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1, c2x_left, c2y_left, cos_t2, sin_t2) cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1, c2x_right, c2y_right, cos_t2, sin_t2) # the parralel bezier lines are created with control points of # [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right] path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)] path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)] return path_left, path_right def make_wedged_bezier2(bezier2, length, shrink_factor=0.5): """ Being similar to get_parallels, returns control points of two quadrativ bezier lines having a width roughly parralel to given one separated by *width*. """ xx1, yy1 = bezier2[2] xx2, yy2 = bezier2[1] xx3, yy3 = bezier2[0] cx, cy = xx3, yy3 x0, y0 = xx2, yy2 dist = sqrt((x0-cx)**2 + (y0-cy)**2) cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist, x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length) xx12, yy12 = (xx1+xx2)/2., (yy1+yy2)/2., xx23, yy23 = (xx2+xx3)/2., (yy2+yy3)/2., dist = sqrt((xx12-xx23)**2 + (yy12-yy23)**2) cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist, xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor) l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)] l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)] return l_plus, l_minus def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y): """ Find control points of the bezier line throught c1, mm, c2. We simply assume that c1, mm, c2 which have parameteric value 0, 0.5, and 1. """ cmx = .5 * (4*mmx - (c1x + c2x)) cmy = .5 * (4*mmy - (c1y + c2y)) return [(c1x, c1y), (cmx, cmy), (c2x, c2y)] def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.): """ Being similar to get_parallels, returns control points of two quadrativ bezier lines having a width roughly parralel to given one separated by *width*. """ # c1, cm, c2 c1x, c1y = bezier2[0] cmx, cmy = bezier2[1] c3x, c3y = bezier2[2] # t1 and t2 is the anlge between c1 and cm, cm, c3. # They are also a angle of the tangential line of the path at c1 and c3 cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy) cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y) # find c1_left, c1_right which are located along the lines # throught c1 and perpendicular to the tangential lines of the # bezier path at a distance of width. Same thing for c3_left and # c3_right with respect to c3. c1x_left, c1y_left, c1x_right, c1y_right = \ get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1) c3x_left, c3y_left, c3x_right, c3y_right = \ get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2) # find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and c12-c23 c12x, c12y = (c1x+cmx)*.5, (c1y+cmy)*.5 c23x, c23y = (cmx+c3x)*.5, (cmy+c3y)*.5 c123x, c123y = (c12x+c23x)*.5, (c12y+c23y)*.5 # tangential angle of c123 (angle between c12 and c23) cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y) c123x_left, c123y_left, c123x_right, c123y_right = \ get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm) path_left = find_control_points(c1x_left, c1y_left, c123x_left, c123y_left, c3x_left, c3y_left) path_right = find_control_points(c1x_right, c1y_right, c123x_right, c123y_right, c3x_right, c3y_right) return path_left, path_right if 0: path = Path([(0, 0), (1, 0), (2, 2)], [Path.MOVETO, Path.CURVE3, Path.CURVE3]) left, right = divide_path_inout(path, inside) clf() ax = gca()
agpl-3.0
djgagne/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
305
4121
""" Testing for the nearest centroid module. """ import numpy as np from scipy import sparse as sp from numpy.testing import assert_array_equal from numpy.testing import assert_equal from sklearn.neighbors import NearestCentroid from sklearn import datasets from sklearn.metrics.pairwise import pairwise_distances # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] X_csr = sp.csr_matrix(X) # Sparse matrix y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] T_csr = sp.csr_matrix(T) true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_classification_toy(): # Check classification on a toy dataset, including sparse versions. clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) # Same test, but with a sparse matrix to fit and test. clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit with sparse, test with non-sparse clf = NearestCentroid() clf.fit(X_csr, y) assert_array_equal(clf.predict(T), true_result) # Fit with non-sparse, test with sparse clf = NearestCentroid() clf.fit(X, y) assert_array_equal(clf.predict(T_csr), true_result) # Fit and predict with non-CSR sparse matrices clf = NearestCentroid() clf.fit(X_csr.tocoo(), y) assert_array_equal(clf.predict(T_csr.tolil()), true_result) def test_precomputed(): clf = NearestCentroid(metric="precomputed") clf.fit(X, y) S = pairwise_distances(T, clf.centroids_) assert_array_equal(clf.predict(S), true_result) def test_iris(): # Check consistency on dataset iris. for metric in ('euclidean', 'cosine'): clf = NearestCentroid(metric=metric).fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.9, "Failed with score = " + str(score) def test_iris_shrinkage(): # Check consistency on dataset iris, when using shrinkage. for metric in ('euclidean', 'cosine'): for shrink_threshold in [None, 0.1, 0.5]: clf = NearestCentroid(metric=metric, shrink_threshold=shrink_threshold) clf = clf.fit(iris.data, iris.target) score = np.mean(clf.predict(iris.data) == iris.target) assert score > 0.8, "Failed with score = " + str(score) def test_pickle(): import pickle # classification obj = NearestCentroid() obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_array_equal(score, score2, "Failed to generate same score" " after pickling (classification).") def test_shrinkage_threshold_decoded_y(): clf = NearestCentroid(shrink_threshold=0.01) y_ind = np.asarray(y) y_ind[y_ind == -1] = 0 clf.fit(X, y_ind) centroid_encoded = clf.centroids_ clf.fit(X, y) assert_array_equal(centroid_encoded, clf.centroids_) def test_predict_translated_data(): # Test that NearestCentroid gives same results on translated data rng = np.random.RandomState(0) X = rng.rand(50, 50) y = rng.randint(0, 3, 50) noise = rng.rand(50) clf = NearestCentroid(shrink_threshold=0.1) clf.fit(X, y) y_init = clf.predict(X) clf = NearestCentroid(shrink_threshold=0.1) X_noise = X + noise clf.fit(X_noise, y) y_translate = clf.predict(X_noise) assert_array_equal(y_init, y_translate) def test_manhattan_metric(): # Test the manhattan metric. clf = NearestCentroid(metric='manhattan') clf.fit(X, y) dense_centroid = clf.centroids_ clf.fit(X_csr, y) assert_array_equal(clf.centroids_, dense_centroid) assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
bsd-3-clause
seokjunbing/cs75
src/data_processing/compare_data_processing.py
1
2664
from Bio import SeqIO from os import listdir import pandas as pd from data_processing.read_dicts import construct_dicts """ This file should be used for reading the data files from other papers we are comparing the results to. This is required since their formats are somewhat different """ # FOLDERS DATA_FOLDER = '../../data' AAINDEX_FOLDER = '%s/aaindex' % DATA_FOLDER COMPARE_FOLDER = '%s/compare' % DATA_FOLDER # FILES AAINDEX_FILE = '%s/aaindex1.txt' % AAINDEX_FOLDER AAINDEX_USED_FILE = '%s/aaindex_used.txt' % AAINDEX_FOLDER OUTPUT_FILE = "%s/%s" % (COMPARE_FOLDER, 'compare_scores.csv') def read_all_fasta_files(dir): """ Reads all the fasta files in a directory and puts them in a dataframe. :param dir: directory where all the files are :return: dataframe w/ each point labeled by its filename """ files_and_labels = [('%s/%s' % (dir, f), f.split('.')[0]) for f in listdir(dir) if f.split('.')[1] == 'fasta'] sequence_matrix = [(SeqIO.parse(file, 'fasta'), label) for file, label in files_and_labels] cols = ['seq', 'label'] main_df = pd.DataFrame(columns=cols) for sequence_list, label in sequence_matrix: sequence_list = list(sequence_list) label_list = [label for i in range(len(sequence_list))] current_label_df = pd.DataFrame(data=[[str(line.seq) for line in sequence_list], label_list]) current_label_df = current_label_df.transpose() current_label_df.columns = cols main_df = main_df.append(current_label_df) return main_df def get_scores(sequence_matrix, scoring_file, indices_used): """ Takes a DataFrame of sequences and adds one score for each index used to each sequence. :param sequence_matrix: DataFrame containing sequences and labels :param scoring_file: string of file path of the aaindex scoring file :param indices_used: list of index accession names for the indices to be used to score the sequences :return: sequence_matrix with added rows, one for each index in indices_used """ index_scores = construct_dicts(scoring_file)[0] for index in indices_used: scores = [] for seq in sequence_matrix.seq: val = sum([index_scores.get(index).get(char, 0) for char in seq]) scores.append(val) sequence_matrix[index] = pd.Series(scores) return sequence_matrix if __name__ == '__main__': with open(AAINDEX_USED_FILE) as f: indices_used = [line.strip() for line in f.readlines()] print(indices_used) mat = read_all_fasta_files(COMPARE_FOLDER) scores = get_scores(mat, AAINDEX_FILE, indices_used) scores.to_csv(OUTPUT_FILE)
gpl-3.0
maximus009/kaggle-galaxies
predict_augmented_npy_maxout2048.py
8
9452
""" Load an analysis file and redo the predictions on the validation set / test set, this time with augmented data and averaging. Store them as numpy files. """ import numpy as np # import pandas as pd import theano import theano.tensor as T import layers import cc_layers import custom import load_data import realtime_augmentation as ra import time import csv import os import cPickle as pickle BATCH_SIZE = 32 # 16 NUM_INPUT_FEATURES = 3 CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size # ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl" ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048.pkl" DO_VALID = True # disable this to not bother with the validation set evaluation DO_TEST = True # disable this to not generate predictions on the testset target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz") target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename) target_path_test = os.path.join("predictions/final/augmented/test", target_filename) print "Loading model data etc." analysis = np.load(ANALYSIS_PATH) input_sizes = [(69, 69), (69, 69)] ds_transforms = [ ra.build_ds_transform(3.0, target_size=input_sizes[0]), ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)] num_input_representations = len(ds_transforms) # split training data into training + a small validation set num_train = load_data.num_train num_valid = num_train // 10 # integer division num_train -= num_valid num_test = load_data.num_test valid_ids = load_data.train_ids[num_train:] train_ids = load_data.train_ids[:num_train] test_ids = load_data.test_ids train_indices = np.arange(num_train) valid_indices = np.arange(num_train, num_train+num_valid) test_indices = np.arange(num_test) y_valid = np.load("data/solutions_train.npy")[num_train:] print "Build model" l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]) l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1]) l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True) l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r) l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2) l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2) l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2) l3s = cc_layers.ShuffleC01BToBC01Layer(l3) j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts # l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5) l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity) l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape') # l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity) l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity) # l6 = layers.OutputLayer(l5, error_measure='mse') l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.) xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)] idx = T.lscalar('idx') givens = { l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE], l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE], } compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens) print "Load model parameters" layers.set_param_values(l6, analysis['param_values']) print "Create generators" # set here which transforms to use to make predictions augmentation_transforms = [] for zoom in [1 / 1.2, 1.0, 1.2]: for angle in np.linspace(0, 360, 10, endpoint=False): augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom)) augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped print " %d augmentation transforms." % len(augmentation_transforms) augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms) valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1) augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms) test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1) approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE))) approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE))) print "Approximately %d chunks for the validation set" % approx_num_chunks_valid print "Approximately %d chunks for the test set" % approx_num_chunks_test if DO_VALID: print print "VALIDATION SET" print "Compute predictions" predictions_list = [] start_time = time.time() for e, (chunk_data, chunk_length) in enumerate(valid_gen): print "Chunk %d" % (e + 1) xs_chunk = chunk_data # need to transpose the chunks to move the 'channels' dimension up xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] print " load data onto GPU" for x_shared, x_chunk in zip(xs_shared, xs_chunk): x_shared.set_value(x_chunk) num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # make predictions, don't forget to cute off the zeros at the end predictions_chunk_list = [] for b in xrange(num_batches_chunk): if b % 1000 == 0: print " batch %d/%d" % (b + 1, num_batches_chunk) predictions = compute_output(b) predictions_chunk_list.append(predictions) predictions_chunk = np.vstack(predictions_chunk_list) predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding print " compute average over transforms" predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1) predictions_list.append(predictions_chunk_avg) time_since_start = time.time() - start_time print " %s since start" % load_data.hms(time_since_start) all_predictions = np.vstack(predictions_list) print "Write predictions to %s" % target_path_valid load_data.save_gz(target_path_valid, all_predictions) print "Evaluate" rmse_valid = analysis['losses_valid'][-1] rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2)) print " MSE (last iteration):\t%.6f" % rmse_valid print " MSE (augmented):\t%.6f" % rmse_augmented if DO_TEST: print print "TEST SET" print "Compute predictions" predictions_list = [] start_time = time.time() for e, (chunk_data, chunk_length) in enumerate(test_gen): print "Chunk %d" % (e + 1) xs_chunk = chunk_data # need to transpose the chunks to move the 'channels' dimension up xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] print " load data onto GPU" for x_shared, x_chunk in zip(xs_shared, xs_chunk): x_shared.set_value(x_chunk) num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # make predictions, don't forget to cute off the zeros at the end predictions_chunk_list = [] for b in xrange(num_batches_chunk): if b % 1000 == 0: print " batch %d/%d" % (b + 1, num_batches_chunk) predictions = compute_output(b) predictions_chunk_list.append(predictions) predictions_chunk = np.vstack(predictions_chunk_list) predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding print " compute average over transforms" predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1) predictions_list.append(predictions_chunk_avg) time_since_start = time.time() - start_time print " %s since start" % load_data.hms(time_since_start) all_predictions = np.vstack(predictions_list) print "Write predictions to %s" % target_path_test load_data.save_gz(target_path_test, all_predictions) print "Done!"
bsd-3-clause
pelson/cartopy
lib/cartopy/examples/wmts_time.py
3
1957
""" Web Map Tile Service time dimension demonstration ------------------------------------------------- This example further demonstrates WMTS support within cartopy. Optional keyword arguments can be supplied to the OGC WMTS 'gettile' method. This allows for the specification of the 'time' dimension for a WMTS layer which supports it. The example shows satellite imagery retrieved from NASA's Global Imagery Browse Services for 5th Feb 2016. A true color MODIS image is shown on the left, with the MODIS false color 'snow RGB' shown on the right. """ __tags__ = ['Web services'] import matplotlib.pyplot as plt import matplotlib.patheffects as PathEffects from owslib.wmts import WebMapTileService import cartopy.crs as ccrs def main(): # URL of NASA GIBS URL = 'http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/wmts.cgi' wmts = WebMapTileService(URL) # Layers for MODIS true color and snow RGB layers = ['MODIS_Terra_SurfaceReflectance_Bands143', 'MODIS_Terra_CorrectedReflectance_Bands367'] date_str = '2016-02-05' # Plot setup plot_CRS = ccrs.Mercator() geodetic_CRS = ccrs.Geodetic() x0, y0 = plot_CRS.transform_point(4.6, 43.1, geodetic_CRS) x1, y1 = plot_CRS.transform_point(11.0, 47.4, geodetic_CRS) ysize = 8 xsize = 2 * ysize * (x1 - x0) / (y1 - y0) fig = plt.figure(figsize=(xsize, ysize), dpi=100) for layer, offset in zip(layers, [0, 0.5]): ax = fig.add_axes([offset, 0, 0.5, 1], projection=plot_CRS) ax.set_xlim((x0, x1)) ax.set_ylim((y0, y1)) ax.add_wmts(wmts, layer, wmts_kwargs={'time': date_str}) txt = ax.text(4.7, 43.2, wmts[layer].title, fontsize=18, color='wheat', transform=geodetic_CRS) txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='black')]) plt.show() if __name__ == '__main__': main()
lgpl-3.0
keirl/bigdata
code/plot_gas_per_capita.py
1
1522
#Import the numpy and pandas libraries import pandas as pd import plotly.plotly as plotly import os #Get the OS folder/directory path data_path = os.path.join(os.path.dirname(__file__),os.pardir,'rawdata') #Read the csv file with the state statistic results df = pd.read_csv(data_path+'/STATE_SUMMARY.CSV'); #Define a colorscale for the map clrscl = [[0.0,'rgb(0,255,0)'],[0.25,'rgb(128,255,0)'],[0.5,'rgb(255,255,0)'],\ [0.75,'rgb(255,128,0)'],[1.0,'rgb(255,0,0)']] #Define the data to be plotted and labels to be applied data = [ dict( type = 'choropleth', colorscale = clrscl, autocolorscale = False, locations=df['STATEABBR'], z = df['GASCAPITA'].astype(float), locationmode = 'USA-states', text = 'Annual Gasoline Consumption Per Capita', marker = dict( line = dict(color = 'rgb(255,255,255)',width = 2) ), colorbar = dict(title = "Gallons") ) ] #Define which type of map to use layout = dict( title = 'US Gasoline Consumption by State Per Capita', geo = dict( scope='usa', projection=dict( type='albers usa' ), showlakes = True, lakecolor = 'rgb(255, 255, 255)', ), ) #Create the figure from the data and the layout defined fig = dict( data=data, layout=layout ) #Plot the figure and load it as an html in a browser url = plotly.plot( fig, filename='gasoline_consumption_per_capita_map' )
mit
marcsans/cnn-physics-perception
phy/lib/python2.7/site-packages/sklearn/neural_network/tests/test_rbm.py
225
6278
import sys import re import numpy as np from scipy.sparse import csc_matrix, csr_matrix, lil_matrix from sklearn.utils.testing import (assert_almost_equal, assert_array_equal, assert_true) from sklearn.datasets import load_digits from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.neural_network import BernoulliRBM from sklearn.utils.validation import assert_all_finite np.seterr(all='warn') Xdigits = load_digits().data Xdigits -= Xdigits.min() Xdigits /= Xdigits.max() def test_fit(): X = Xdigits.copy() rbm = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, n_iter=7, random_state=9) rbm.fit(X) assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0) # in-place tricks shouldn't have modified X assert_array_equal(X, Xdigits) def test_partial_fit(): X = Xdigits.copy() rbm = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=20, random_state=9) n_samples = X.shape[0] n_batches = int(np.ceil(float(n_samples) / rbm.batch_size)) batch_slices = np.array_split(X, n_batches) for i in range(7): for batch in batch_slices: rbm.partial_fit(batch) assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0) assert_array_equal(X, Xdigits) def test_transform(): X = Xdigits[:100] rbm1 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42) rbm1.fit(X) Xt1 = rbm1.transform(X) Xt2 = rbm1._mean_hiddens(X) assert_array_equal(Xt1, Xt2) def test_small_sparse(): # BernoulliRBM should work on small sparse matrices. X = csr_matrix(Xdigits[:4]) BernoulliRBM().fit(X) # no exception def test_small_sparse_partial_fit(): for sparse in [csc_matrix, csr_matrix]: X_sparse = sparse(Xdigits[:100]) X = Xdigits[:100].copy() rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, random_state=9) rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, random_state=9) rbm1.partial_fit(X_sparse) rbm2.partial_fit(X) assert_almost_equal(rbm1.score_samples(X).mean(), rbm2.score_samples(X).mean(), decimal=0) def test_sample_hiddens(): rng = np.random.RandomState(0) X = Xdigits[:100] rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42) rbm1.fit(X) h = rbm1._mean_hiddens(X[0]) hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0) assert_almost_equal(h, hs, decimal=1) def test_fit_gibbs(): # Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] # from the same input rng = np.random.RandomState(42) X = np.array([[0.], [1.]]) rbm1 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng) # you need that much iters rbm1.fit(X) assert_almost_equal(rbm1.components_, np.array([[0.02649814], [0.02009084]]), decimal=4) assert_almost_equal(rbm1.gibbs(X), X) return rbm1 def test_fit_gibbs_sparse(): # Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from # the same input even when the input is sparse, and test against non-sparse rbm1 = test_fit_gibbs() rng = np.random.RandomState(42) from scipy.sparse import csc_matrix X = csc_matrix([[0.], [1.]]) rbm2 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng) rbm2.fit(X) assert_almost_equal(rbm2.components_, np.array([[0.02649814], [0.02009084]]), decimal=4) assert_almost_equal(rbm2.gibbs(X), X.toarray()) assert_almost_equal(rbm1.components_, rbm2.components_) def test_gibbs_smoke(): # Check if we don't get NaNs sampling the full digits dataset. # Also check that sampling again will yield different results. X = Xdigits rbm1 = BernoulliRBM(n_components=42, batch_size=40, n_iter=20, random_state=42) rbm1.fit(X) X_sampled = rbm1.gibbs(X) assert_all_finite(X_sampled) X_sampled2 = rbm1.gibbs(X) assert_true(np.all((X_sampled != X_sampled2).max(axis=1))) def test_score_samples(): # Test score_samples (pseudo-likelihood) method. # Assert that pseudo-likelihood is computed without clipping. # See Fabian's blog, http://bit.ly/1iYefRk rng = np.random.RandomState(42) X = np.vstack([np.zeros(1000), np.ones(1000)]) rbm1 = BernoulliRBM(n_components=10, batch_size=2, n_iter=10, random_state=rng) rbm1.fit(X) assert_true((rbm1.score_samples(X) < -300).all()) # Sparse vs. dense should not affect the output. Also test sparse input # validation. rbm1.random_state = 42 d_score = rbm1.score_samples(X) rbm1.random_state = 42 s_score = rbm1.score_samples(lil_matrix(X)) assert_almost_equal(d_score, s_score) # Test numerical stability (#2785): would previously generate infinities # and crash with an exception. with np.errstate(under='ignore'): rbm1.score_samples([np.arange(1000) * 100]) def test_rbm_verbose(): rbm = BernoulliRBM(n_iter=2, verbose=10) old_stdout = sys.stdout sys.stdout = StringIO() try: rbm.fit(Xdigits) finally: sys.stdout = old_stdout def test_sparse_and_verbose(): # Make sure RBM works with sparse input when verbose=True old_stdout = sys.stdout sys.stdout = StringIO() from scipy.sparse import csc_matrix X = csc_matrix([[0.], [1.]]) rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1, random_state=42, verbose=True) try: rbm.fit(X) s = sys.stdout.getvalue() # make sure output is sound assert_true(re.match(r"\[BernoulliRBM\] Iteration 1," r" pseudo-likelihood = -?(\d)+(\.\d+)?," r" time = (\d|\.)+s", s)) finally: sys.stdout = old_stdout
mit
jeffery-do/Vizdoombot
doom/lib/python3.5/site-packages/matplotlib/backends/qt_editor/formlayout.py
4
20138
# -*- coding: utf-8 -*- """ formlayout ========== Module creating Qt form dialogs/layouts to edit various type of parameters formlayout License Agreement (MIT License) ------------------------------------------ Copyright (c) 2009 Pierre Raybaut Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six from matplotlib.externals.six.moves import xrange # History: # 1.0.10: added float validator (disable "Ok" and "Apply" button when not valid) # 1.0.7: added support for "Apply" button # 1.0.6: code cleaning __version__ = '1.0.10' __license__ = __doc__ DEBUG = False import sys STDERR = sys.stderr from matplotlib.colors import is_color_like from matplotlib.colors import rgb2hex from matplotlib.colors import colorConverter from matplotlib.backends.qt_compat import QtGui, QtWidgets, QtCore if not hasattr(QtWidgets, 'QFormLayout'): raise ImportError("Warning: formlayout requires PyQt4 >v4.3 or PySide") import datetime BLACKLIST = set(["title", "label"]) def col2hex(color): """Convert matplotlib color to hex before passing to Qt""" return rgb2hex(colorConverter.to_rgb(color)) class ColorButton(QtWidgets.QPushButton): """ Color choosing push button """ colorChanged = QtCore.Signal(QtGui.QColor) def __init__(self, parent=None): QtWidgets.QPushButton.__init__(self, parent) self.setFixedSize(20, 20) self.setIconSize(QtCore.QSize(12, 12)) self.clicked.connect(self.choose_color) self._color = QtGui.QColor() def choose_color(self): color = QtWidgets.QColorDialog.getColor(self._color, self.parentWidget(), '') if color.isValid(): self.set_color(color) def get_color(self): return self._color @QtCore.Slot(QtGui.QColor) def set_color(self, color): if color != self._color: self._color = color self.colorChanged.emit(self._color) pixmap = QtGui.QPixmap(self.iconSize()) pixmap.fill(color) self.setIcon(QtGui.QIcon(pixmap)) color = QtCore.Property(QtGui.QColor, get_color, set_color) def col2hex(color): """Convert matplotlib color to hex before passing to Qt""" return rgb2hex(colorConverter.to_rgb(color)) def to_qcolor(color): """Create a QColor from a matplotlib color""" qcolor = QtGui.QColor() color = str(color) try: color = col2hex(color) except ValueError: #print('WARNING: ignoring invalid color %r' % color) return qcolor # return invalid QColor qcolor.setNamedColor(color) # set using hex color return qcolor # return valid QColor class ColorLayout(QtWidgets.QHBoxLayout): """Color-specialized QLineEdit layout""" def __init__(self, color, parent=None): QtWidgets.QHBoxLayout.__init__(self) assert isinstance(color, QtGui.QColor) self.lineedit = QtWidgets.QLineEdit(color.name(), parent) self.lineedit.editingFinished.connect(self.update_color) self.addWidget(self.lineedit) self.colorbtn = ColorButton(parent) self.colorbtn.color = color self.colorbtn.colorChanged.connect(self.update_text) self.addWidget(self.colorbtn) def update_color(self): color = self.text() qcolor = to_qcolor(color) self.colorbtn.color = qcolor # defaults to black if not qcolor.isValid() def update_text(self, color): self.lineedit.setText(color.name()) def text(self): return self.lineedit.text() def font_is_installed(font): """Check if font is installed""" return [fam for fam in QtGui.QFontDatabase().families() if six.text_type(fam) == font] def tuple_to_qfont(tup): """ Create a QFont from tuple: (family [string], size [int], italic [bool], bold [bool]) """ if not isinstance(tup, tuple) or len(tup) != 4 \ or not font_is_installed(tup[0]) \ or not isinstance(tup[1], int) \ or not isinstance(tup[2], bool) \ or not isinstance(tup[3], bool): return None font = QtGui.QFont() family, size, italic, bold = tup font.setFamily(family) font.setPointSize(size) font.setItalic(italic) font.setBold(bold) return font def qfont_to_tuple(font): return (six.text_type(font.family()), int(font.pointSize()), font.italic(), font.bold()) class FontLayout(QtWidgets.QGridLayout): """Font selection""" def __init__(self, value, parent=None): QtWidgets.QGridLayout.__init__(self) font = tuple_to_qfont(value) assert font is not None # Font family self.family = QtWidgets.QFontComboBox(parent) self.family.setCurrentFont(font) self.addWidget(self.family, 0, 0, 1, -1) # Font size self.size = QtWidgets.QComboBox(parent) self.size.setEditable(True) sizelist = list(xrange(6, 12)) + list(xrange(12, 30, 2)) + [36, 48, 72] size = font.pointSize() if size not in sizelist: sizelist.append(size) sizelist.sort() self.size.addItems([str(s) for s in sizelist]) self.size.setCurrentIndex(sizelist.index(size)) self.addWidget(self.size, 1, 0) # Italic or not self.italic = QtWidgets.QCheckBox(self.tr("Italic"), parent) self.italic.setChecked(font.italic()) self.addWidget(self.italic, 1, 1) # Bold or not self.bold = QtWidgets.QCheckBox(self.tr("Bold"), parent) self.bold.setChecked(font.bold()) self.addWidget(self.bold, 1, 2) def get_font(self): font = self.family.currentFont() font.setItalic(self.italic.isChecked()) font.setBold(self.bold.isChecked()) font.setPointSize(int(self.size.currentText())) return qfont_to_tuple(font) def is_edit_valid(edit): text = edit.text() state = edit.validator().validate(text, 0)[0] return state == QtGui.QDoubleValidator.Acceptable class FormWidget(QtWidgets.QWidget): update_buttons = QtCore.Signal() def __init__(self, data, comment="", parent=None): QtWidgets.QWidget.__init__(self, parent) from copy import deepcopy self.data = deepcopy(data) self.widgets = [] self.formlayout = QtWidgets.QFormLayout(self) if comment: self.formlayout.addRow(QtWidgets.QLabel(comment)) self.formlayout.addRow(QtWidgets.QLabel(" ")) if DEBUG: print("\n"+("*"*80)) print("DATA:", self.data) print("*"*80) print("COMMENT:", comment) print("*"*80) def get_dialog(self): """Return FormDialog instance""" dialog = self.parent() while not isinstance(dialog, QtWidgets.QDialog): dialog = dialog.parent() return dialog def setup(self): for label, value in self.data: if DEBUG: print("value:", value) if label is None and value is None: # Separator: (None, None) self.formlayout.addRow(QtWidgets.QLabel(" "), QtWidgets.QLabel(" ")) self.widgets.append(None) continue elif label is None: # Comment self.formlayout.addRow(QtWidgets.QLabel(value)) self.widgets.append(None) continue elif tuple_to_qfont(value) is not None: field = FontLayout(value, self) elif label.lower() not in BLACKLIST and is_color_like(value): field = ColorLayout(to_qcolor(value), self) elif isinstance(value, six.string_types): field = QtWidgets.QLineEdit(value, self) elif isinstance(value, (list, tuple)): if isinstance(value, tuple): value = list(value) selindex = value.pop(0) field = QtWidgets.QComboBox(self) if isinstance(value[0], (list, tuple)): keys = [key for key, _val in value] value = [val for _key, val in value] else: keys = value field.addItems(value) if selindex in value: selindex = value.index(selindex) elif selindex in keys: selindex = keys.index(selindex) elif not isinstance(selindex, int): print("Warning: '%s' index is invalid (label: " "%s, value: %s)" % (selindex, label, value), file=STDERR) selindex = 0 field.setCurrentIndex(selindex) elif isinstance(value, bool): field = QtWidgets.QCheckBox(self) if value: field.setCheckState(QtCore.Qt.Checked) else: field.setCheckState(QtCore.Qt.Unchecked) elif isinstance(value, float): field = QtWidgets.QLineEdit(repr(value), self) field.setCursorPosition(0) field.setValidator(QtGui.QDoubleValidator(field)) field.validator().setLocale(QtCore.QLocale("C")) dialog = self.get_dialog() dialog.register_float_field(field) field.textChanged.connect(lambda text: dialog.update_buttons()) elif isinstance(value, int): field = QtWidgets.QSpinBox(self) field.setRange(-1e9, 1e9) field.setValue(value) elif isinstance(value, datetime.datetime): field = QtWidgets.QDateTimeEdit(self) field.setDateTime(value) elif isinstance(value, datetime.date): field = QtWidgets.QDateEdit(self) field.setDate(value) else: field = QtWidgets.QLineEdit(repr(value), self) self.formlayout.addRow(label, field) self.widgets.append(field) def get(self): valuelist = [] for index, (label, value) in enumerate(self.data): field = self.widgets[index] if label is None: # Separator / Comment continue elif tuple_to_qfont(value) is not None: value = field.get_font() elif isinstance(value, six.string_types) or is_color_like(value): value = six.text_type(field.text()) elif isinstance(value, (list, tuple)): index = int(field.currentIndex()) if isinstance(value[0], (list, tuple)): value = value[index][0] else: value = value[index] elif isinstance(value, bool): value = field.checkState() == QtCore.Qt.Checked elif isinstance(value, float): value = float(str(field.text())) elif isinstance(value, int): value = int(field.value()) elif isinstance(value, datetime.datetime): value = field.dateTime().toPyDateTime() elif isinstance(value, datetime.date): value = field.date().toPyDate() else: value = eval(str(field.text())) valuelist.append(value) return valuelist class FormComboWidget(QtWidgets.QWidget): update_buttons = QtCore.Signal() def __init__(self, datalist, comment="", parent=None): QtWidgets.QWidget.__init__(self, parent) layout = QtWidgets.QVBoxLayout() self.setLayout(layout) self.combobox = QtWidgets.QComboBox() layout.addWidget(self.combobox) self.stackwidget = QtWidgets.QStackedWidget(self) layout.addWidget(self.stackwidget) self.combobox.currentIndexChanged.connect(self.stackwidget.setCurrentIndex) self.widgetlist = [] for data, title, comment in datalist: self.combobox.addItem(title) widget = FormWidget(data, comment=comment, parent=self) self.stackwidget.addWidget(widget) self.widgetlist.append(widget) def setup(self): for widget in self.widgetlist: widget.setup() def get(self): return [widget.get() for widget in self.widgetlist] class FormTabWidget(QtWidgets.QWidget): update_buttons = QtCore.Signal() def __init__(self, datalist, comment="", parent=None): QtWidgets.QWidget.__init__(self, parent) layout = QtWidgets.QVBoxLayout() self.tabwidget = QtWidgets.QTabWidget() layout.addWidget(self.tabwidget) self.setLayout(layout) self.widgetlist = [] for data, title, comment in datalist: if len(data[0]) == 3: widget = FormComboWidget(data, comment=comment, parent=self) else: widget = FormWidget(data, comment=comment, parent=self) index = self.tabwidget.addTab(widget, title) self.tabwidget.setTabToolTip(index, comment) self.widgetlist.append(widget) def setup(self): for widget in self.widgetlist: widget.setup() def get(self): return [widget.get() for widget in self.widgetlist] class FormDialog(QtWidgets.QDialog): """Form Dialog""" def __init__(self, data, title="", comment="", icon=None, parent=None, apply=None): QtWidgets.QDialog.__init__(self, parent) self.apply_callback = apply # Form if isinstance(data[0][0], (list, tuple)): self.formwidget = FormTabWidget(data, comment=comment, parent=self) elif len(data[0]) == 3: self.formwidget = FormComboWidget(data, comment=comment, parent=self) else: self.formwidget = FormWidget(data, comment=comment, parent=self) layout = QtWidgets.QVBoxLayout() layout.addWidget(self.formwidget) self.float_fields = [] self.formwidget.setup() # Button box self.bbox = bbox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) self.formwidget.update_buttons.connect(self.update_buttons) if self.apply_callback is not None: apply_btn = bbox.addButton(QtWidgets.QDialogButtonBox.Apply) apply_btn.clicked.connect(self.apply) bbox.accepted.connect(self.accept) bbox.rejected.connect(self.reject) layout.addWidget(bbox) self.setLayout(layout) self.setWindowTitle(title) if not isinstance(icon, QtGui.QIcon): icon = QtWidgets.QWidget().style().standardIcon(QtWidgets.QStyle.SP_MessageBoxQuestion) self.setWindowIcon(icon) def register_float_field(self, field): self.float_fields.append(field) def update_buttons(self): valid = True for field in self.float_fields: if not is_edit_valid(field): valid = False for btn_type in (QtWidgets.QDialogButtonBox.Ok, QtWidgets.QDialogButtonBox.Apply): btn = self.bbox.button(btn_type) if btn is not None: btn.setEnabled(valid) def accept(self): self.data = self.formwidget.get() QtWidgets.QDialog.accept(self) def reject(self): self.data = None QtWidgets.QDialog.reject(self) def apply(self): self.apply_callback(self.formwidget.get()) def get(self): """Return form result""" return self.data def fedit(data, title="", comment="", icon=None, parent=None, apply=None): """ Create form dialog and return result (if Cancel button is pressed, return None) data: datalist, datagroup title: string comment: string icon: QIcon instance parent: parent QWidget apply: apply callback (function) datalist: list/tuple of (field_name, field_value) datagroup: list/tuple of (datalist *or* datagroup, title, comment) -> one field for each member of a datalist -> one tab for each member of a top-level datagroup -> one page (of a multipage widget, each page can be selected with a combo box) for each member of a datagroup inside a datagroup Supported types for field_value: - int, float, str, unicode, bool - colors: in Qt-compatible text form, i.e. in hex format or name (red,...) (automatically detected from a string) - list/tuple: * the first element will be the selected index (or value) * the other elements can be couples (key, value) or only values """ # Create a QApplication instance if no instance currently exists # (e.g., if the module is used directly from the interpreter) if QtWidgets.QApplication.startingUp(): _app = QtWidgets.QApplication([]) dialog = FormDialog(data, title, comment, icon, parent, apply) if dialog.exec_(): return dialog.get() if __name__ == "__main__": def create_datalist_example(): return [('str', 'this is a string'), ('list', [0, '1', '3', '4']), ('list2', ['--', ('none', 'None'), ('--', 'Dashed'), ('-.', 'DashDot'), ('-', 'Solid'), ('steps', 'Steps'), (':', 'Dotted')]), ('float', 1.2), (None, 'Other:'), ('int', 12), ('font', ('Arial', 10, False, True)), ('color', '#123409'), ('bool', True), ('date', datetime.date(2010, 10, 10)), ('datetime', datetime.datetime(2010, 10, 10)), ] def create_datagroup_example(): datalist = create_datalist_example() return ((datalist, "Category 1", "Category 1 comment"), (datalist, "Category 2", "Category 2 comment"), (datalist, "Category 3", "Category 3 comment")) #--------- datalist example datalist = create_datalist_example() def apply_test(data): print("data:", data) print("result:", fedit(datalist, title="Example", comment="This is just an <b>example</b>.", apply=apply_test)) #--------- datagroup example datagroup = create_datagroup_example() print("result:", fedit(datagroup, "Global title")) #--------- datagroup inside a datagroup example datalist = create_datalist_example() datagroup = create_datagroup_example() print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"), (datalist, "Title 2", "Tab 2 comment"), (datalist, "Title 3", "Tab 3 comment")), "Global title"))
mit
RLPAgroScience/ROIseries
ROIseries/sub_routines/sub_routines.py
1
1059
import os import pandas as pd import numpy as np def file_search(top_dir, extension): result = [] for dir_path,dir_names,files in os.walk(top_dir): for name in files: if name.lower().endswith(extension): result.append(os.path.join(dir_path, name)) return result def sort_index_columns_inplace(df): for i in [0, 1]: df.sort_index(axis=i, inplace=True) def idx_corners(n_vars, direction): idx_range = np.arange(n_vars) x = np.repeat(idx_range, (idx_range + 1)[::-1]) y = np.concatenate([idx_range[i:] for i in range(n_vars)]) if direction == 'up_right': x = x y = y elif direction == 'down_right': x = (n_vars - 1) - x y = y elif direction == 'down_left': x = (n_vars - 1) - x y = (n_vars - 1) - y elif direction == 'up_left': x = x y = (n_vars - 1) - y else: raise ValueError("direction not in " "['up_right','down_left','up_left','up_right']") return [x, y]
agpl-3.0
scienceguyrob/KnownSourceMatcher
KnownSourceMatcher/dist/Interactive.py
2
29344
""" This file is part of the KnownSourceMatcher. KnownSourceMatcher is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. KnownSourceMatcher is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with KnownSourceMatcher. If not, see <http://www.gnu.org/licenses/>. File name: Interactive.py Created: February 7th, 2014 Author: Rob Lyon Contact: rob@scienceguyrob.com or robert.lyon@postgrad.manchester.ac.uk Web: <http://www.scienceguyrob.com> or <http://www.cs.manchester.ac.uk> or <http://www.jb.man.ac.uk> This code runs on python 2.4 or later. """ import copy, gzip, os, math, string, ordereddict, operator import KnownSource import numpy as np import PFDFile as pfd from Utilities import Utilities from xml.dom import minidom # For viewing candidates. from PIL import Image # @UnresolvedImport - Ignore this comment, simply stops my IDE complaining. import matplotlib.pyplot as plt import matplotlib.image as mpimg # ****************************** # # CLASS DEFINITION # # ****************************** class Interactive(Utilities): """ Script used to interactively match against a collection of known sources from a pulsar catalog, given user specified period or DM values. """ # ****************************** # # INIT FUNCTION # # ****************************** def __init__(self,debugFlag, db,output): """ Initialises the class, and passes the pulsar catalog to it. """ Utilities.__init__(self,debugFlag) self.db = db self.harmonics = [1, 0.5, 0.3, 0.25, 0.2, 0.16, 0.142, 0.125, 0.111, 0.1, 0.0909,0.0833,0.0769,0.0714,0.0666,0.0625,0.03125,0.015625] self.width = 10 # The width of the image viewing panel. self.height = 8 # The height of the image viewing panel. # ****************************** # # FUNCTIONS. # # ****************************** def run(self): """ Begins the process of interactively search a pulsar catalog. """ print "Running interactively, press '3' to exit." self.showMenu() # **************************************************************************************************** def showMenu(self): """ Shows the first menu that provides a choice between searching a catalog, or exiting the application. """ exit = False while(exit == False): print "********************" print "1. Search...." print "2. Manually match" print "3. Exit" print "********************\n" choice = 0 while (choice <= 0 or choice >=4): try: choice = int(raw_input("Enter choice (1-3): ")) except: choice = 0 if(choice == 1): self.processChoice() elif(choice == 2): self.manuallyMatch() elif(choice == 3): exit = True # **************************************************************************************************** def processChoice(self): """ Processes the users choice to search the catalog. Provides the option to search by period or DM. """ print "\n\t********************" print "\t1. Search by period" print "\t2. Search by DM" print "\t********************\n" choice = 0 while (choice <= 0 or choice >=3): try: choice = int(raw_input("Enter choice (1 or 2): ")) except: choice = 0 if(choice == 1): self.searchByPeriod() elif(choice == 2): self.searchByDM() # **************************************************************************************************** def manuallyMatch(self): """ Enables manual matching. """ print "\n\t*************************" print "\tManually match candidates" print "\t*************************\n" directory = "" while (not os.path.isdir(directory)): try: directory = str(raw_input("Enter path to directory containing candidates to match (or x to exit): ")) if(directory=='x'): # User wants to exit. return True except: directory = "" outputFile = "" while (not os.path.exists(outputFile)): try: outputFile = str(raw_input("Enter a valid file path to write matches to (or x to exit): ")) if(outputFile=='x'): # User wants to exit. return True open(outputFile, 'a').close() if(self.fileExists(outputFile)): self.appendToFile(outputFile, "Manual match log,,,,,,,,',\n") self.appendToFile(outputFile, "Candidate,RAJ,DECJ,P0,DM,Known Source,RAJ,DECJ,P0,DM,Harmonic,Angular Separation\n") except: outputFile = "" maxAngSep = 0 while (maxAngSep <= 0 or maxAngSep >=1000): try: maxAngSep = float(raw_input("Enter max angular separation you're willing to tolerate between matches: ")) except: maxAngSep = 0 count = 0 print "\n\tWill now loop over candidates found to be matched. Press x to break loop.\n\n" # Temporary values RAJ = "" DECJ = "" P0 = 0 DM = 0 matches = [] imageShown = False # Search the supplied directory recursively. for root, directories, files in os.walk(directory): for file in files: # If the file found isn't some form of candidate file, then ignore it. if(not file.endswith('.phcx.gz') and not file.endswith('.pfd')): continue # Else if we reach here we must have a candidate file. print "\n\n************************************************************************************************************************\nProcessing file: ", file, "\n" # If we have a HTRU candidate if file.endswith('.phcx.gz'): # Create a KnownSource object from the candidate file. candidate = self.processPHCX(os.path.join(root, file)) count += 1 RAJ = candidate.getParameterAtIndex("RAJ", 0) DECJ = candidate.getParameterAtIndex("DECJ",0) P0 = float(candidate.getParameterAtIndex("P0",0)) DM = candidate.getParameterAtIndex("DM",0) # Output formatting and the details of the candidate found print '{:<55}'.format("Name") + "\t" + '{:<12}'.format("RA") + "\t" + '{:<13}'.format("DEC") + "\t" + '{:<20}'.format("Period") + "\t" + '{:<15}'.format("DM") print '{:<55}'.format(file) + "\t" + '{:<12}'.format(str(RAJ)) + "\t" + '{:<13}'.format(str(DECJ)) + "\t" + '{:<20}'.format(str(P0)) + "\t" + '{:<15}'.format(str(DM)) print "\nPossible matches to check\n" # Look for potential matches. matches=self.searchPeriod(P0) pngPath = os.path.join(root, file) + ".png" if(self.fileExists(pngPath)): fig=plt.figure(figsize=(self.width,self.height))# @UnusedVariable plt.ion() candidateImage = mpimg.imread(pngPath) plt.imshow(candidateImage, aspect='auto') plt.show() imageShown = True # If we have a PFD candidate elif file.endswith('.pfd'): # Create a KnownSource object from the candidate file. candidate = self.processPFD(os.path.join(root, file)) count += 1 RAJ = candidate.getParameterAtIndex("RAJ", 0) DECJ = candidate.getParameterAtIndex("DECJ",0) P0 = float(candidate.getParameterAtIndex("P0",0)) DM = candidate.getParameterAtIndex("DM",0) # Output formatting and the details of the candidate found print '{:<55}'.format("Name") + "\t" + '{:<12}'.format("RA") + "\t" + '{:<13}'.format("DEC") + "\t" + '{:<20}'.format("Period") + "\t" + '{:<15}'.format("DM") print '{:<55}'.format(file) + "\t" + '{:<12}'.format(str(RAJ)) + "\t" + '{:<13}'.format(str(DECJ)) + "\t" + '{:<20}'.format(str(P0)) + "\t" + '{:<15}'.format(str(DM)) print "\nPossible matches to check\n" # Look for potential matches. matches=self.searchPeriod(P0) # If no matches found and we are dealing with a candidate file of some sort... if(len(matches)==0 and (file.endswith('.phcx.gz') or file.endswith('.pfd'))): print "No match" #detail = file + "," + RAJ + "," + DECJ + "," + str(P0) + "," + str(DM) + ",,,,\n" #.appendToFile(outputFile, detail) # Else there is at least 1 potential match elif(len(matches) > 0 and (file.endswith('.phcx.gz') or file.endswith('.pfd'))): # First print out the potential matches. These need to be ordered according # to angular separation. This is important as there could be many matches, especially # when considering that harmonics could match. # At this point matches contains tuples of known sources along with the potential reason for # a match, e.g. 1st harmonic or 8th harmonic. But these matches are not sorted in any way. print '{:<5}'.format("Match") + "\t" +'{:<10}'.format("Name") + "\t" + '{:<12}'.format("RA") + "\t" + '{:<13}'.format("DEC") + "\t" + '{:<20}'.format("Period") + "\t" + '{:<15}'.format("DM") + "\t" + '{:<15}'.format("Harmonic") + "\t" + '{:<15}'.format("Separation") # Here we filter according the the angular separation specified by the user. separationFilteredMatches={} separationFilteredDetails={} count=0 for source, reasonForMatch in matches: angularSeparation = self.findAngularSep(source.getParameterAtIndex("RAJ", 0),source.getParameterAtIndex("DECJ",0), RAJ, DECJ) if(angularSeparation <= maxAngSep ): count+=1 harmonic = int(1.0/float(reasonForMatch)) source.harmonic = harmonic source.angularSeparation = angularSeparation separationFilteredMatches[source.sourceName]= source separationFilteredDetails[source.sourceName]=source.shortStr() + "\t" + '{:<15}'.format(str(harmonic)) + "\t" + '{:<15}'.format(str(angularSeparation)) #print str(count) + "\t" + source.shortStr() + "\t" + '{:<15}'.format(str(reasonForMatch)) + "\t" + '{:<15}'.format(str(angularSeparation)) # Add an extra candidate in case the user would like to match to RFI. # This is a quick messy fix to allow you to match an RFI candidate explicitly # to RFI, if any such RFI candidates squeezed through previous filtering steps. rfi = KnownSource.KnownSource("RFI") rfi.angularSeparation = 100000 rfi.harmonic = 1 rfi.addParameter("PSRJ RFI 0 0") rfi.addParameter("RAJ 00:00:00 0 0") rfi.addParameter("DECJ 00:00:00 0 0") rfi.addParameter("P0 0 0 0") rfi.addParameter("F0 0 0 0") rfi.addParameter("DM 0 0 0") separationFilteredMatches["RFI"]= rfi separationFilteredDetails["RFI"]= rfi.shortStr() + "\t" + '{:<15}'.format(str("RFI")) + "\t" + '{:<15}'.format(str("NaN")) # Now order according to location in the sky. orderedSourcesDict = ordereddict.OrderedDict() for s in (sorted(separationFilteredMatches.values(), key=operator.attrgetter('angularSeparation'))): orderedSourcesDict[copy.copy(s.sourceName)] = copy.deepcopy(s) count=0 matches=[] # Reset matches.... for key in orderedSourcesDict.keys(): value = orderedSourcesDict[key] count+=1 matches.append(value) # Re-populate matches. print str(count) + "\t" + separationFilteredDetails[key] print "Which match to record (0 for none, last in the list for RFI, otherwise choose the appropriate number)." choice = -2 while (choice <= -2 or choice >=len(separationFilteredMatches)): try: c = raw_input("Enter choice (or x for exit): ") if(c=='x'): # User wants to exit. return True choice = int(c)-1 # user must have chosen a match except: choice = -6 # Arbitrary value. # If user has chosen a match, then record it. if(choice >= 0): detail_a = str(os.path.join(root, file)) + "," + RAJ + "," + DECJ + "," + str(P0) + "," + str(DM) + "," detail_b = matches[choice].shortStrCSV() + "," + str(matches[choice].harmonic) + "," + str(matches[choice].angularSeparation) + "\n" detail_c = detail_a+detail_b self.appendToFile(outputFile, detail_c) if(imageShown): plt.clf() plt.close() imageShown = False print "Compared ", count , " candidates to ", len(self.db.orderedSourcesDict), " known sources. " # **************************************************************************************************** def searchByPeriod(self): """ Searches the loaded catalog by period. """ period = 0.0 while (period <= 0.0): try: period = float(raw_input("Enter period(s): ")) except: period =0.0 print "Name \tRA \tDEC \tPeriod(s)\tDM" for key in self.db.orderedSourcesDict.keys(): try: knownSource = self.db.orderedSourcesDict[key] cand_period = float(knownSource.getParameterAtIndex("P0",0)) acc = (float(self.db.accuracy)/100)*float(cand_period) for i in range(0,len(self.harmonics)): # Evaluate the search condition search_cond = float(cand_period) > (float(period) * float(self.harmonics[i])) - float(acc) and\ (float(cand_period) < (float(period) * float(self.harmonics[i])) + float(acc)) if(search_cond): print knownSource.shortStr() + " Harmonic: " + str(self.harmonics[i]) except ValueError as ve: pass # **************************************************************************************************** def searchPeriod(self,period): """ Searches the loaded catalog by period. """ #print "Name \tRA \tDEC \tPeriod(s)\tDM" matches = [] count = 0 for key in self.db.orderedSourcesDict.keys(): try: knownSource = self.db.orderedSourcesDict[key] cand_period = float(knownSource.getParameterAtIndex("P0",0)) acc = (float(self.db.accuracy)/100)*float(cand_period) for i in range(0,len(self.harmonics)): # Evaluate the search condition search_cond = float(cand_period) > (float(period) * float(self.harmonics[i])) - float(acc) and\ (float(cand_period) < (float(period) * float(self.harmonics[i])) + float(acc)) if(search_cond): count+=1 #print str(count) + "\t" + knownSource.shortStr() + " Harmonic: " + str(self.harmonics[i]) matches.append(tuple([knownSource,str(self.harmonics[i])])) except ValueError as ve: pass return matches # **************************************************************************************************** def searchByDM(self): """ Searches the loaded catalog by DM. """ dm = 0.0 while (dm <= 0.0): try: dm = float(raw_input("Enter DM: ")) except: dm =0.0 print "Name \tRA \tDEC \tPeriod(s)\t\tDM" for key in self.db.orderedSourcesDict.keys(): try: knownSource = self.db.orderedSourcesDict[key] cand_dm = float(knownSource.getParameterAtIndex("DM",0)) acc = (float(self.db.accuracy)/100)*float(cand_dm) # Evaluate the search condition search_cond = float(cand_dm) > (float(dm) ) - float(acc) and\ (float(cand_dm) < (float(dm) ) + float(acc)) if(search_cond): print knownSource.shortStr() except ValueError as ve: pass # **************************************************************************************************** def processPHCX(self,path): """ Compares a candidate in a ".phcx.gz" file to the known sources in the ATNF catalog. Each ".phcx.gz" file is a compressed XML file, so here we use XML parsing modules to extract the candidate parameters. """ self.o("Processing PHCX file at: " + path + "\n") contents = gzip.open(path,'rb') xmldata = minidom.parse(contents) contents.close() # Build candidate by extracting data from .phcx file. period = float(xmldata.getElementsByTagName('BaryPeriod')[1].childNodes[0].data) RAJ = xmldata.getElementsByTagName('RA')[0].childNodes[0].data DECJ = xmldata.getElementsByTagName('Dec')[0].childNodes[0].data DM = float(xmldata.getElementsByTagName('Dm')[1].childNodes[0].data) SNR = float(xmldata.getElementsByTagName('Snr')[1].childNodes[0].data) # BUILD the candidate. # Here there are two possible cases to watch out for. Either RAJ and DECJ # are user specified strings, or they are numerical values extracted from # a candidate file. If these are numerical values, then they must be converted # in to the correct string format for comparison. if (not isinstance(RAJ, str)): # Convert the RA in degrees into HH:MM:SS ra_hrs = float(RAJ) * 24.0 / 360.0 ra_hr = int( ra_hrs ) ra_min = int( float( ra_hrs - ra_hr ) * 60.0 ) ra_sec = int( float( ra_hrs - ra_hr - ra_min / 60 ) * 60.0 ) # Convert the DEC in degrees into HH:MM:SS dec_deg = int(float(DECJ)) dec_abs = np.abs(float(DECJ)) dec_min = int((float(dec_abs) - np.abs(dec_deg)) * 60.0 ) dec_sec = int((float(dec_abs) - np.abs(dec_deg) - dec_min / 60.0 ) * 60.0 ) RAJ_str = str(ra_hr) + ":"+str(ra_min)+":"+str(ra_sec) DECJ_str = str(dec_deg) + ":" + str(dec_min) + ":" + str(dec_sec) #print "RAJ: " + RAJ_str + " DECJ: "+DECJ_str # DEBUGGING self.o( "Candidate -> "+ path + " Period = " + str(period) + " RAJ = " + str(RAJ_str) + " DECJ = " + str(DECJ_str) + " DM = " + str(DM) ) # Build a KnownSource object from this candidate. We will then find its # position in the ordered dictionary, and if no known sources have a sortAttribute # within a user specified threshold ( the self.searchPadding variable). # Note that not all sources will have a DM value. candidateSource = KnownSource.KnownSource() candidateSource.addParameter("PSRJ " + path + " 0") if (isinstance(RAJ, str)): candidateSource.addParameter("RAJ " + RAJ + " 0") candidateSource.addParameter("DECJ " + DECJ + " 0") else: candidateSource.addParameter("RAJ " + RAJ_str + " 0") candidateSource.addParameter("DECJ " + DECJ_str + " 0") candidateSource.addParameter("DM " + str(DM) + " 0") candidateSource.addParameter("P0 " + str(period) + " 0") candidateSource.addParameter("SNR " + str(SNR) + " 0") return candidateSource # **************************************************************************************************** def processPFD(self,path): """ Compares a candidate in a ".pfd" file to the known sources in the ATNF catalog. """ self.o("Processing PFD file at: " + path + "\n") cand = pfd.PFD(self.debug,path) cand.load() # Build candidate by extracting data from .phcx file. period = float(cand.getPeriod()) RAJ = cand.getRA() DECJ = cand.getDEC() DM = cand.getDM() SNR = cand.getSNR() # BUILD the candidate. # Here there are two possible cases to watch out for. Either RAJ and DECJ # are user specified strings, or they are numerical values extracted from # a candidate file. If these are numerical values, then they must be converted # in to the correct string format for comparison. if (not isinstance(RAJ, str)): # Convert the RA in degrees into HH:MM:SS ra_hrs = float(RAJ) * 24.0 / 360.0 ra_hr = int( ra_hrs ) ra_min = int( float( ra_hrs - ra_hr ) * 60.0 ) ra_sec = int( float( ra_hrs - ra_hr - ra_min / 60 ) * 60.0 ) # Convert the DEC in degrees into HH:MM:SS dec_deg = int(float(DECJ)) dec_abs = np.abs(float(DECJ)) dec_min = int((float(dec_abs) - np.abs(dec_deg)) * 60.0 ) dec_sec = int((float(dec_abs) - np.abs(dec_deg) - dec_min / 60.0 ) * 60.0 ) RAJ_str = str(ra_hr) + ":"+str(ra_min)+":"+str(ra_sec) DECJ_str = str(dec_deg) + ":" + str(dec_min) + ":" + str(dec_sec) #print "RAJ: " + RAJ_str + " DECJ: "+DECJ_str else: RAJ_str = RAJ DECJ_str = DECJ # DEBUGGING self.o( "Candidate -> "+ path + " Period = " + str(period) + " RAJ = " + str(RAJ_str) + " DECJ = " + str(DECJ_str) + " DM = " + str(DM) ) # Build a KnownSource object from this candidate. We will then find its # position in the ordered dictionary, and if no known sources have a sortAttribute # within a user specified threshold ( the self.searchPadding variable). # Note that not all sources will have a DM value. candidateSource = KnownSource.KnownSource() candidateSource.addParameter("PSRJ " + path + " 0") if (isinstance(RAJ, str)): candidateSource.addParameter("RAJ " + RAJ + " 0") candidateSource.addParameter("DECJ " + DECJ + " 0") else: candidateSource.addParameter("RAJ " + RAJ_str + " 0") candidateSource.addParameter("DECJ " + DECJ_str + " 0") candidateSource.addParameter("DM " + str(DM) + " 0") candidateSource.addParameter("P0 " + str(period) + " 0") candidateSource.addParameter("SNR " + str(SNR) + " 0") return candidateSource # **************************************************************************************************** def findAngularSep(self, knownSource_RAJ, knownSource_DECJ, candidate_RAJ, candidate_DECJ): """ Calculates the angular separation between a known source and a candidate pulsar. The expected input is four strings, such that each string is of the form: 00:00:00 These correspond to the right ascension and declination of each of the sources. The value returned is the separation between the two sources theta. Code originally written by Ben Stappers. """ # Split the strings list_RAJ_A = string.split(knownSource_RAJ, ":") list_DECJ_A = string.split(knownSource_DECJ, ":") #print "Candidate -> RAJ = ", candidate_RAJ," DECJ = ", candidate_DECJ #print "KnownSource -> RAJ = ", knownSource_RAJ," DECJ = ", knownSource_DECJ pointA_RA_dec = float(str(list_RAJ_A[0])) + float(str(list_RAJ_A[1]))/60.0 + float(str(list_RAJ_A[2]))/3600.0 pointA_DEC_dec = float(str(list_DECJ_A[0])) + float(str(list_DECJ_A[1]))/60.0 + float(str(list_DECJ_A[2]))/3600.0 list_RAJ_B = string.split(candidate_RAJ, ":") list_DECJ_B = string.split(candidate_DECJ, ":") if(len(list_RAJ_B)==3): pointB_RA_dec = float(str(list_RAJ_B[0])) + float(str(list_RAJ_B[1]))/60.0 + float(str(list_RAJ_B[2]))/3600.0 elif(len(list_RAJ_B)==2): pointB_RA_dec = float(str(list_RAJ_B[0])) + float(str(list_RAJ_B[1]))/60.0 if(len(list_DECJ_B)==3): pointB_DEC_dec = float(str(list_DECJ_B[0])) + float(str(list_DECJ_B[1]))/60.0 + float(str(list_DECJ_B[2]))/3600.0 elif(len(list_DECJ_B) == 2): pointB_DEC_dec = float(str(list_DECJ_B[0])) + float(str(list_DECJ_B[1]))/60.0 # Convert to Radians r1 = (pointA_RA_dec/360) * 2 * math.pi d1 = (pointA_DEC_dec/360)* 2 * math.pi r2 = (pointB_RA_dec/360) * 2 * math.pi d2 = (pointB_DEC_dec/360) * 2 * math.pi # Calculate the angular separation theta atanpart = math.atan(math.sqrt( math.cos(d2)*math.cos(d2)*math.pow((math.sin(r2-r1)),2) + math.pow((math.cos(d1)*math.sin(d2)-math.sin(d1)*math.cos(d2)*math.cos(r2-r1)),2 )) / (math.sin(d1)*math.sin(d2)+math.cos(d1)*math.cos(d2)*math.cos(r2-r1))) if(atanpart < 0): theta = (atanpart*180/math.pi) + 180 else: theta = atanpart*180/math.pi return theta # ****************************************************************************************************
gpl-2.0
zhoujh30/folium
folium/element.py
1
15488
# -*- coding: utf-8 -*- """ Elements ------ A generic class for creating Elements. """ from uuid import uuid4 from jinja2 import Environment, PackageLoader, Template ENV = Environment(loader=PackageLoader('folium', 'templates')) from collections import OrderedDict import json from .six import urlopen from .utilities import _camelify, _parse_size class Element(object): """Basic Element object that does nothing. Other Elements may inherit from this one.""" def __init__(self, template=None, template_name=None): """Creates a Element.""" self._name = 'Element' self._id = uuid4().hex self._env = ENV self._children = OrderedDict() self._parent = None self._template = Template(template) if template is not None\ else ENV.get_template(template_name) if template_name is not None\ else Template(u""" {% for name, element in this._children.items() %} {{element.render(**kwargs)}} {% endfor %} """) def get_name(self): return _camelify(self._name) + '_' +self._id def add_children(self, child, name=None, index=None): """Add a children.""" if name is None: name = child.get_name() if index is None: self._children[name] = child else: items = [item for item in self._children.items() if item[0] != name] items.insert(int(index),(name,child)) self._children = items child._parent = self def add_to(self, parent, name=None, index=None): """Add element to a parent.""" parent.add_children(self, name=name, index=index) def to_dict(self, depth=-1, ordered=True, **kwargs): if ordered: dict_fun = OrderedDict else: dict_fun = dict out = dict_fun() out['name'] = self._name out['id'] = self._id if depth != 0: out['children'] = dict_fun([(name, child.to_dict(depth=depth-1))\ for name,child in self._children.items()]) return out def to_json(self, depth=-1, **kwargs): return json.dumps(self.to_dict(depth=depth, ordered=True), **kwargs) def get_root(self): """Returns the root of the elements tree.""" if self._parent is None: return self else: return self._parent.get_root() def render(self, **kwargs): """TODO : docstring here.""" return self._template.render(this=self, kwargs=kwargs) class Link(Element): def get_code(self): if self.code is None: self.code = urlopen(self.url).read() return self.code def to_dict(self, depth=-1, **kwargs): out = super(Link, self).to_dict(depth=-1, **kwargs) out['url'] = self.url return out class JavascriptLink(Link): def __init__(self, url, download=False): """Create a JavascriptLink object based on a url. Parameters ---------- url : str The url to be linked download : bool, default False Whether the target document shall be loaded right now. """ super(JavascriptLink, self).__init__() self._name = 'JavascriptLink' self.url = url self.code = None if download: self.get_code() self._template = Template(u""" {% if kwargs.get("embedded",False) %} <script>{{this.get_code()}}</script> {% else %} <script src="{{this.url}}"></script> {% endif %} """) class CssLink(Link): def __init__(self, url, download=False): """Create a CssLink object based on a url. Parameters ---------- url : str The url to be linked download : bool, default False Whether the target document shall be loaded right now. """ super(CssLink, self).__init__() self._name = 'CssLink' self.url = url self.code = None if download: self.get_code() self._template = Template(u""" {% if kwargs.get("embedded",False) %} <style>{{this.get_code()}}</style> {% else %} <link rel="stylesheet" href="{{this.url}}" /> {% endif %} """) _default_js = [ ('leaflet', "https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.js"), ('jquery', "https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"), ('bootstrap', "https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"), ('awesome_markers', "https://rawgithub.com/lvoogdt/Leaflet.awesome-markers/2.0/develop/dist/leaflet.awesome-markers.js"), ('marker_cluster_src', "https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster-src.js"), ('marker_cluster', "https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster.js"), ] _default_css = [ ("leaflet_css", "https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.css"), ("bootstrap_css", "https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css"), ("bootstrap_theme_css", "https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css"), ("awesome_markers_font_css", "https://maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css"), ("awesome_markers_css", "https://rawgit.com/lvoogdt/Leaflet.awesome-markers/2.0/develop/dist/leaflet.awesome-markers.css"), ("marker_cluster_default_css", "https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.Default.css"), ("marker_cluster_css", "https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.css"), ("awesome_rotate_css", "https://raw.githubusercontent.com/python-visualization/folium/master/folium/templates/leaflet.awesome.rotate.css"), ] class Figure(Element): def __init__(self, figsize=(17,10)): super(Figure, self).__init__() self._name = 'Figure' self.header = Element() self.html = Element() self.script = Element() self.header._parent = self self.html._parent = self self.script._parent = self self.figsize = figsize self._template = Template(u""" <!DOCTYPE html> <head> {{this.header.render(**kwargs)}} </head> <body> {{this.html.render(**kwargs)}} </body> <script> {{this.script.render(**kwargs)}} </script> """) # Create the meta tag self.header.add_children(Element( '<meta http-equiv="content-type" content="text/html; charset=UTF-8" />'), name='meta_http') # Import Javascripts for name, url in _default_js: self.header.add_children(JavascriptLink(url), name=name) # Import Css for name, url in _default_css: self.header.add_children(CssLink(url), name=name) self.header.add_children(Element(""" <style> html, body { width: 100%; height: 100%; margin: 0; padding: 0; } #map { position:absolute; top:0; bottom:0; right:0; left:0; } </style> """), name='css_style') def to_dict(self, depth=-1, **kwargs): out = super(Figure, self).to_dict(depth=depth, **kwargs) out['header'] = self.header.to_dict(depth=depth-1, **kwargs) out['html'] = self.html.to_dict(depth=depth-1, **kwargs) out['script'] = self.script.to_dict(depth=depth-1, **kwargs) return out def render(self, **kwargs): """TODO : docstring here.""" for name, child in self._children.items(): child.render(**kwargs) return self._template.render(this=self, kwargs=kwargs) def _repr_html_(self, **kwargs): """Displays the Figure in a Jupyter notebook. Parameters ---------- self : folium.Map object The map you want to display figsize : tuple of length 2, default (17,10) The size of the output you expect in inches. Output is 60dpi so that the output has same size as a matplotlib figure with the same figsize. """ html = self.render(**kwargs) width, height = self.figsize iframe = '<iframe src="{html}" width="{width}px" height="{height}px"></iframe>'\ .format(\ html = "data:text/html;base64,"+html.encode('utf8').encode('base64'), #html = self.HTML.replace('"','&quot;'), width = int(60.*width), height= int(60.*height), ) return iframe def add_subplot(self, x,y,n,margin=0.05): width = 1./y height = 1./x left = ((n-1)%y)*width top = ((n-1)//y)*height left = left+width*margin top = top+height*margin width = width*(1-2.*margin) height = height*(1-2.*margin) div = Div(position='absolute', width="{}%".format(100.*width), height="{}%".format(100.*height), left="{}%".format(100.*left), top="{}%".format(100.*top), ) self.add_children(div) return div class Html(Element): def __init__(self, data, width="100%", height="100%"): """TODO : docstring here""" super(Html, self).__init__() self._name = 'Html' self.data = data self.width = _parse_size(width) self.height = _parse_size(height) self._template = Template(u""" <div id="{{this.get_name()}}" style="width: {{this.width[0]}}{{this.width[1]}}; height: {{this.height[0]}}{{this.height[1]}};"> {{this.data}}</div> """) class Div(Figure): def __init__(self, width='100%', height='100%', left="0%", top="0%", position='relative'): """Create a Map with Folium and Leaflet.js """ super(Figure, self).__init__() self._name = 'Div' # Size Parameters. self.width = _parse_size(width) self.height = _parse_size(height) self.left = _parse_size(left) self.top = _parse_size(top) self.position = position self.header = Element() self.html = Element(""" {% for name, element in this._children.items() %} {{element.render(**kwargs)}} {% endfor %} """) self.script = Element() self.header._parent = self self.html._parent = self self.script._parent = self self._template = Template(u""" {% macro header(this, kwargs) %} <style> #{{this.get_name()}} { position : {{this.position}}; width : {{this.width[0]}}{{this.width[1]}}; height: {{this.height[0]}}{{this.height[1]}}; left: {{this.left[0]}}{{this.left[1]}}; top: {{this.top[0]}}{{this.top[1]}}; </style> {% endmacro %} {% macro html(this, kwargs) %} <div id="{{this.get_name()}}"> {{this.html.render(**kwargs)}} </div> {% endmacro %} """) def get_root(self): return self def render(self, **kwargs): """TODO : docstring here.""" figure = self._parent assert isinstance(figure,Figure), ("You cannot render this Element " "if it's not in a Figure.") for name, element in self._children.items(): element.render(**kwargs) for name, element in self.header._children.items(): figure.header.add_children(element, name=name) for name, element in self.script._children.items(): figure.script.add_children(element, name=name) header = self._template.module.__dict__.get('header',None) if header is not None: figure.header.add_children(Element(header(self, kwargs)), name=self.get_name()) html = self._template.module.__dict__.get('html',None) if html is not None: figure.html.add_children(Element(html(self, kwargs)), name=self.get_name()) script = self._template.module.__dict__.get('script',None) if script is not None: figure.script.add_children(Element(script(self, kwargs)), name=self.get_name()) def _repr_html_(self, figsize=(17,10), **kwargs): """Displays the Map in a Jupyter notebook. Parameters ---------- self : folium.Map object The map you want to display figsize : tuple of length 2, default (17,10) The size of the output you expect in inches. Output is 60dpi so that the output has same size as a matplotlib figure with the same figsize. """ if self._parent is None: self.add_to(Figure()) out = self._parent._repr_html_(figsize=figsize, **kwargs) self._parent = None else: out = self._parent._repr_html_(figsize=figsize, **kwargs) return out class MacroElement(Element): """This is a parent class for Elements defined by a macro template. To compute your own element, all you have to do is: * To inherit from this class * Overwrite the '_name' attribute * Overwrite the '_template' attribute with something of the form: {% macro header(this, kwargs) %} ... {% endmacro %} {% macro html(this, kwargs) %} ... {% endmacro %} {% macro script(this, kwargs) %} ... {% endmacro %} """ def __init__(self): """TODO : docstring here""" super(MacroElement, self).__init__() self._name = 'MacroElement' self._template = Template(u"") def render(self, **kwargs): figure = self.get_root() assert isinstance(figure,Figure), ("You cannot render this Element " "if it's not in a Figure.") header = self._template.module.__dict__.get('header',None) if header is not None: figure.header.add_children(Element(header(self, kwargs)), name=self.get_name()) html = self._template.module.__dict__.get('html',None) if html is not None: figure.html.add_children(Element(html(self, kwargs)), name=self.get_name()) script = self._template.module.__dict__.get('script',None) if script is not None: figure.script.add_children(Element(script(self, kwargs)), name=self.get_name()) for name, element in self._children.items(): element.render(**kwargs)
mit
nhejazi/scikit-learn
sklearn/feature_selection/tests/test_from_model.py
7
7314
import numpy as np from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import skip_if_32bit from sklearn import datasets from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso from sklearn.svm import LinearSVC from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import PassiveAggressiveClassifier iris = datasets.load_iris() data, y = iris.data, iris.target rng = np.random.RandomState(0) def test_invalid_input(): clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None) for threshold in ["gobbledigook", ".5 * gobbledigook"]: model = SelectFromModel(clf, threshold=threshold) model.fit(data, y) assert_raises(ValueError, model.transform, data) def test_input_estimator_unchanged(): # Test that SelectFromModel fits on a clone of the estimator. est = RandomForestClassifier() transformer = SelectFromModel(estimator=est) transformer.fit(data, y) assert_true(transformer.estimator is est) @skip_if_32bit def test_feature_importances(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) est = RandomForestClassifier(n_estimators=50, random_state=0) for threshold, func in zip(["mean", "median"], [np.mean, np.median]): transformer = SelectFromModel(estimator=est, threshold=threshold) transformer.fit(X, y) assert_true(hasattr(transformer.estimator_, 'feature_importances_')) X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) importances = transformer.estimator_.feature_importances_ feature_mask = np.abs(importances) > func(importances) assert_array_almost_equal(X_new, X[:, feature_mask]) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 est = RandomForestClassifier(n_estimators=50, random_state=0) transformer = SelectFromModel(estimator=est) transformer.fit(X, y, sample_weight=sample_weight) importances = transformer.estimator_.feature_importances_ transformer.fit(X, y, sample_weight=3 * sample_weight) importances_bis = transformer.estimator_.feature_importances_ assert_almost_equal(importances, importances_bis) # For the Lasso and related models, the threshold defaults to 1e-5 transformer = SelectFromModel(estimator=Lasso(alpha=0.1)) transformer.fit(X, y) X_new = transformer.transform(X) mask = np.abs(transformer.estimator_.coef_) > 1e-5 assert_array_equal(X_new, X[:, mask]) @skip_if_32bit def test_feature_importances_2d_coef(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, n_classes=4) est = LogisticRegression() for threshold, func in zip(["mean", "median"], [np.mean, np.median]): for order in [1, 2, np.inf]: # Fit SelectFromModel a multi-class problem transformer = SelectFromModel(estimator=LogisticRegression(), threshold=threshold, norm_order=order) transformer.fit(X, y) assert_true(hasattr(transformer.estimator_, 'coef_')) X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) # Manually check that the norm is correctly performed est.fit(X, y) importances = np.linalg.norm(est.coef_, axis=0, ord=order) feature_mask = importances > func(importances) assert_array_equal(X_new, X[:, feature_mask]) def test_partial_fit(): est = PassiveAggressiveClassifier(random_state=0, shuffle=False, max_iter=5, tol=None) transformer = SelectFromModel(estimator=est) transformer.partial_fit(data, y, classes=np.unique(y)) old_model = transformer.estimator_ transformer.partial_fit(data, y, classes=np.unique(y)) new_model = transformer.estimator_ assert_true(old_model is new_model) X_transform = transformer.transform(data) transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) assert_array_equal(X_transform, transformer.transform(data)) # check that if est doesn't have partial_fit, neither does SelectFromModel transformer = SelectFromModel(estimator=RandomForestClassifier()) assert_false(hasattr(transformer, "partial_fit")) def test_calling_fit_reinitializes(): est = LinearSVC(random_state=0) transformer = SelectFromModel(estimator=est) transformer.fit(data, y) transformer.set_params(estimator__C=100) transformer.fit(data, y) assert_equal(transformer.estimator_.C, 100) def test_prefit(): # Test all possible combinations of the prefit parameter. # Passing a prefit parameter with the selected model # and fitting a unfit model with prefit=False should give same results. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf) model.fit(data, y) X_transform = model.transform(data) clf.fit(data, y) model = SelectFromModel(clf, prefit=True) assert_array_equal(model.transform(data), X_transform) # Check that the model is rewritten if prefit=False and a fitted model is # passed model = SelectFromModel(clf, prefit=False) model.fit(data, y) assert_array_equal(model.transform(data), X_transform) # Check that prefit=True and calling fit raises a ValueError model = SelectFromModel(clf, prefit=True) assert_raises(ValueError, model.fit, data, y) def test_threshold_string(): est = RandomForestClassifier(n_estimators=50, random_state=0) model = SelectFromModel(est, threshold="0.5*mean") model.fit(data, y) X_transform = model.transform(data) # Calculate the threshold from the estimator directly. est.fit(data, y) threshold = 0.5 * np.mean(est.feature_importances_) mask = est.feature_importances_ > threshold assert_array_equal(X_transform, data[:, mask]) def test_threshold_without_refitting(): # Test that the threshold can be set without refitting the model. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf, threshold="0.1 * mean") model.fit(data, y) X_transform = model.transform(data) # Set a higher threshold to filter out more features. model.threshold = "1.0 * mean" assert_greater(X_transform.shape[1], model.transform(data).shape[1])
bsd-3-clause
lukeshingles/artistools
artistools/misc.py
1
43415
#!/usr/bin/env python3 import argparse from functools import lru_cache import gzip import hashlib # import inspect import lzma import math import multiprocessing import os.path import pickle import sys import time import xattr from collections import namedtuple from itertools import chain from functools import wraps # from functools import partial import matplotlib.ticker as ticker import matplotlib.pyplot as plt from pathlib import Path from typing import Iterable # import scipy.signal import numpy as np import pandas as pd from astropy import units as u from astropy import constants as const import artistools # num_processes = 1 # num_processes = multiprocessing.cpu_count() - 1 num_processes = max(1, int(multiprocessing.cpu_count() / 2)) # print(f'Using {num_processes} processes') enable_diskcache = True figwidth = 5 commandlist = { 'artistools-writecodecomparisondata': ('artistools.writecomparisondata', 'main'), 'artistools-modeldeposition': ('artistools.deposition', 'main_analytical'), 'getartisspencerfano': ('artistools.spencerfano', 'main'), 'artistools-spencerfano': ('artistools.spencerfano', 'main'), 'listartistimesteps': ('artistools', 'showtimesteptimes'), 'artistools-timesteptimes': ('artistools', 'showtimesteptimes'), 'artistools-make1dslicefrom3dmodel': ('artistools.makemodel.1dslicefrom3d', 'main'), 'makeartismodel1dslicefromcone': ('artistools.makemodel.1dslicefromconein3dmodel', 'main'), 'makeartismodelbotyanski2017': ('artistools.makemodel.botyanski2017', 'main'), 'makeartismodelfromshen2018': ('artistools.makemodel.shen2018', 'main'), 'makeartismodelfromlapuente': ('artistools.makemodel.lapuente', 'main'), 'makeartismodelscalevelocity': ('artistools.makemodel.scalevelocity', 'main'), 'makeartismodelfullymixed': ('artistools.makemodel.fullymixed', 'main'), 'plotartisdeposition': ('artistools.deposition', 'main'), 'artistools-deposition': ('artistools.deposition', 'main'), 'plotartisestimators': ('artistools.estimators.plotestimators', 'main'), 'artistools-estimators': ('artistools.estimators', 'main'), 'plotartislightcurve': ('artistools.lightcurve.plotlightcurve', 'main'), 'artistools-lightcurve': ('artistools.lightcurve', 'main'), 'plotartislinefluxes': ('artistools.linefluxes', 'main'), 'artistools-linefluxes': ('artistools.linefluxes', 'main'), 'plotartisnltepops': ('artistools.nltepops.plotnltepops', 'main'), 'artistools-nltepops': ('artistools.nltepops', 'main'), 'plotartismacroatom': ('artistools.macroatom', 'main'), 'artistools-macroatom': ('artistools.macroatom', 'main'), 'plotartisnonthermal': ('artistools.nonthermal', 'main'), 'artistools-nonthermal': ('artistools.nonthermal', 'main'), 'plotartisradfield': ('artistools.radfield', 'main'), 'artistools-radfield': ('artistools.radfield', 'main'), 'plotartisspectrum': ('artistools.spectra.plotspectra', 'main'), 'artistools-spectrum': ('artistools.spectra', 'main'), 'plotartistransitions': ('artistools.transitions', 'main'), 'artistools-transitions': ('artistools.transitions', 'main'), 'plotartisinitialcomposition': ('artistools.initial_composition', 'main'), 'artistools-initialcomposition': ('artistools.initial_composition', 'main'), } console_scripts = [f'{command} = {submodulename}:{funcname}' for command, (submodulename, funcname) in commandlist.items()] console_scripts.append('at = artistools:main') console_scripts.append('artistools = artistools:main') PYDIR = os.path.dirname(os.path.abspath(__file__)) plt.style.use('file://' + PYDIR + '/matplotlibrc') elsymbols = ['n'] + list(pd.read_csv(os.path.join(PYDIR, 'data', 'elements.csv'))['symbol'].values) roman_numerals = ('', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X', 'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI', 'XVII', 'XVIII', 'XIX', 'XX') def diskcache(ignoreargs=[], ignorekwargs=[], saveonly=False, quiet=False, savegzipped=False, funcdepends=None, funcversion=None): def printopt(*args, **kwargs): if not quiet: print(*args, **kwargs) @wraps(diskcache) def diskcacheinner(func): @wraps(func) def wrapper(*args, **kwargs): # save cached files in the folder of the first file/folder specified in the arguments modelpath = Path() if 'modelpath' in kwargs: modelpath = kwargs['modelpath'] else: for arg in args: if os.path.isdir(arg): modelpath = arg break if os.path.isfile(arg): modelpath = Path(arg).parent break cachefolder = Path(modelpath, '__artistoolscache__.nosync') if cachefolder.is_dir(): xattr.setxattr(cachefolder, "com.dropbox.ignored", b'1') namearghash = hashlib.sha1() namearghash.update(func.__module__.encode('utf-8')) namearghash.update(func.__qualname__.encode('utf-8')) namearghash.update( str(tuple(arg for argindex, arg in enumerate(args) if argindex not in ignoreargs)).encode('utf-8')) namearghash.update(str({k: v for k, v in kwargs.items() if k not in ignorekwargs}).encode('utf-8')) namearghash_strhex = namearghash.hexdigest() filename_nogz = Path(cachefolder, f'cached-{func.__module__}.{func.__qualname__}-{namearghash_strhex}.tmp') filename_gz = Path(cachefolder, f'cached-{func.__module__}.{func.__qualname__}-{namearghash_strhex}.tmp.gz') execfunc = True saveresult = False functime = -1 if (filename_nogz.exists() or filename_gz.exists()) and not saveonly: # found a candidate file, so load it filename = filename_nogz if filename_nogz.exists() else filename_gz filesize = Path(filename).stat().st_size / 1024 / 1024 try: printopt(f"diskcache: Loading '{filename}' ({filesize:.1f} MiB)...") with zopen(filename, 'rb') as f: result, version_filein = pickle.load(f) if version_filein == str_funcversion: execfunc = False elif (not funcversion) and (not version_filein.startswith('funcversion_')): execfunc = False # elif version_filein == sourcehash_strhex: # execfunc = False else: printopt(f"diskcache: Overwriting '{filename}' (function version mismatch)") except Exception as ex: # ex = sys.exc_info()[0] printopt(f"diskcache: Overwriting '{filename}' (Error: {ex})") pass if execfunc: timestart = time.time() result = func(*args, **kwargs) functime = time.time() - timestart if functime > 1: # slow functions are worth saving to disk saveresult = True else: # check if we need to replace the gzipped or non-gzipped file with the correct one # if we so, need to save the new file even though functime is unknown since we read # from disk version instead of executing the function if savegzipped and filename_nogz.exists(): saveresult = True elif not savegzipped and filename_gz.exists(): saveresult = True if saveresult: # if the cache folder doesn't exist, create it if not cachefolder.is_dir(): cachefolder.mkdir(parents=True, exist_ok=True) xattr.setxattr(cachefolder, "com.dropbox.ignored", b'1') if filename_nogz.exists(): filename_nogz.unlink() if filename_gz.exists(): filename_gz.unlink() fopen, filename = (gzip.open, filename_gz) if savegzipped else (open, filename_nogz) with fopen(filename, 'wb') as f: pickle.dump((result, str_funcversion), f, protocol=pickle.HIGHEST_PROTOCOL) filesize = Path(filename).stat().st_size / 1024 / 1024 printopt(f"diskcache: Saved '{filename}' ({filesize:.1f} MiB, functime {functime:.1f}s)") return result # sourcehash = hashlib.sha1() # sourcehash.update(inspect.getsource(func).encode('utf-8')) # if funcdepends: # try: # for f in funcdepends: # sourcehash.update(inspect.getsource(f).encode('utf-8')) # except TypeError: # sourcehash.update(inspect.getsource(funcdepends).encode('utf-8')) # # sourcehash_strhex = sourcehash.hexdigest() str_funcversion = f'funcversion_{funcversion}' if funcversion else f'funcversion_none' return wrapper if enable_diskcache else func return diskcacheinner class AppendPath(argparse.Action): def __call__(self, parser, args, values, option_string=None): # if getattr(args, self.dest) is None: # setattr(args, self.dest, []) if isinstance(values, Iterable): for pathstr in values: getattr(args, self.dest).append(Path(pathstr)) else: setattr(args, self.dest, Path(values)) class ExponentLabelFormatter(ticker.ScalarFormatter): """Formatter to move the 'x10^x' offset text into the axis label.""" def __init__(self, labeltemplate, useMathText=True, decimalplaces=None): self.set_labeltemplate(labeltemplate) self.decimalplaces = decimalplaces super().__init__(useOffset=True, useMathText=useMathText) # ticker.ScalarFormatter.__init__(self, useOffset=useOffset, useMathText=useMathText) def _set_formatted_label_text(self): # or use self.orderOfMagnitude stroffset = self.get_offset().replace(r'$\times', '$') + ' ' strnewlabel = self.labeltemplate.format(stroffset) self.axis.set_label_text(strnewlabel) assert(self.offset == 0) self.axis.offsetText.set_visible(False) def set_labeltemplate(self, labeltemplate): assert '{' in labeltemplate self.labeltemplate = labeltemplate def set_locs(self, locs): if self.decimalplaces is not None: self.format = '%1.' + str(self.decimalplaces) + 'f' if self._usetex: self.format = '$%s$' % self.format elif self._useMathText: self.format = '$%s$' % ('\\mathdefault{%s}' % self.format) super().set_locs(locs) if self.decimalplaces is not None: # rounding the tick labels will make the locations incorrect unless we round these too newlocs = [float(('%1.' + str(self.decimalplaces) + 'f') % (x / (10 ** self.orderOfMagnitude))) * (10 ** self.orderOfMagnitude) for x in self.locs] super().set_locs(newlocs) self._set_formatted_label_text() def set_axis(self, axis): super().set_axis(axis) self._set_formatted_label_text() def make_namedtuple(typename, **fields): """Make a namedtuple from a dictionary of attributes and values. Example: make_namedtuple('mytuple', x=2, y=3)""" return namedtuple(typename, fields)(*fields.values()) def showtimesteptimes(modelpath=None, numberofcolumns=5, args=None): """Print a table showing the timesteps and their corresponding times.""" if modelpath is None: modelpath = Path() print('Timesteps and midpoint times in days:\n') times = get_timestep_times_float(modelpath, loc='mid') indexendofcolumnone = math.ceil((len(times) - 1) / numberofcolumns) for rownum in range(0, indexendofcolumnone): strline = "" for colnum in range(numberofcolumns): if colnum > 0: strline += '\t' newindex = rownum + colnum * indexendofcolumnone if newindex + 1 < len(times): strline += f'{newindex:4d}: {float(times[newindex + 1]):.3f}d' print(strline) @lru_cache(maxsize=8) def get_composition_data(filename): """Return a pandas DataFrame containing details of included elements and ions.""" if os.path.isdir(Path(filename)): filename = os.path.join(filename, 'compositiondata.txt') columns = ('Z,nions,lowermost_ionstage,uppermost_ionstage,nlevelsmax_readin,' 'abundance,mass,startindex').split(',') compdf = pd.DataFrame() with open(filename, 'r') as fcompdata: nelements = int(fcompdata.readline()) fcompdata.readline() # T_preset fcompdata.readline() # homogeneous_abundances startindex = 0 for _ in range(nelements): line = fcompdata.readline() linesplit = line.split() row_list = list(map(int, linesplit[:5])) + list(map(float, linesplit[5:])) + [startindex] rowdf = pd.DataFrame([row_list], columns=columns) compdf = compdf.append(rowdf, ignore_index=True) startindex += int(rowdf['nions']) return compdf def get_composition_data_from_outputfile(modelpath): """Read ion list from output file""" atomic_composition = {} output = open(modelpath / "output_0-0.txt", 'r').read().splitlines() ioncount = 0 for row in output: if row.split()[0] == '[input.c]': split_row = row.split() if split_row[1] == 'element': Z = int(split_row[4]) ioncount = 0 elif split_row[1] == 'ion': ioncount += 1 atomic_composition[Z] = ioncount composition_df = pd.DataFrame( [(Z, atomic_composition[Z]) for Z in atomic_composition.keys()], columns=['Z', 'nions']) composition_df['lowermost_ionstage'] = [1] * composition_df.shape[0] composition_df['uppermost_ionstage'] = composition_df['nions'] return composition_df def gather_res_data(res_df, index_of_repeated_value=1): """res files repeat output for each angle. index_of_repeated_value is the value to look for repeating eg. time of ts 0. In spec_res files it's 1, but in lc_res file it's 0""" index_to_split = res_df.index[res_df.iloc[:, index_of_repeated_value] == res_df.iloc[0, index_of_repeated_value]] res_data = [] for i, index_value in enumerate(index_to_split): if index_value != index_to_split[-1]: chunk = res_df.iloc[index_to_split[i]:index_to_split[i + 1], :] else: chunk = res_df.iloc[index_to_split[i]:, :] res_data.append(chunk) return res_data def get_vpkt_config(modelpath): filename = Path(modelpath, 'vpkt.txt') vpkt_config = {} with open(filename, 'r') as vpkt_txt: vpkt_config['nobsdirections'] = int(vpkt_txt.readline()) vpkt_config['cos_theta'] = [float(x) for x in vpkt_txt.readline().split()] vpkt_config['phi'] = [float(x) for x in vpkt_txt.readline().split()] nspecflag = int(vpkt_txt.readline()) if nspecflag == 1: vpkt_config['nspectraperobs'] = int(vpkt_txt.readline()) for i in range(vpkt_config['nspectraperobs']): vpkt_txt.readline() else: vpkt_config['nspectraperobs'] = 1 vpkt_config['time_limits_enabled'], vpkt_config['initial_time'], vpkt_config['final_time'] = [ int(x) for x in vpkt_txt.readline().split()] return vpkt_config @lru_cache(maxsize=8) def get_grid_mapping(modelpath): """Return dict with the associated propagation cells for each model grid cell and a dict with the associated model grid cell of each propagration cell.""" if os.path.isdir(modelpath): filename = firstexisting(['grid.out.xz', 'grid.out.gz', 'grid.out'], path=modelpath) else: filename = modelpath assoc_cells = {} mgi_of_propcells = {} with open(filename, 'r') as fgrid: for line in fgrid: row = line.split() propcellid, mgi = int(row[0]), int(row[1]) if mgi not in assoc_cells: assoc_cells[mgi] = [] assoc_cells[mgi].append(propcellid) mgi_of_propcells[propcellid] = mgi return assoc_cells, mgi_of_propcells def get_wid_init(modelpath): # cell width in cm at time tmin tmin = get_timestep_times_float(modelpath, loc='start')[0] * u.day.to('s') _, _, vmax = artistools.inputmodel.get_modeldata(modelpath) rmax = vmax * tmin coordmax0 = rmax ncoordgrid0 = 50 wid_init = 2 * coordmax0 / ncoordgrid0 return wid_init @lru_cache(maxsize=16) def get_nu_grid(modelpath): """Get an array of frequencies at which the ARTIS spectra are binned by exspec.""" specfilename = firstexisting(['spec.out.gz', 'spec.out', 'specpol.out'], path=modelpath) specdata = pd.read_csv(specfilename, delim_whitespace=True) return specdata.loc[:, '0'].values def get_deposition(modelpath): times = get_timestep_times_float(modelpath) depdata = pd.read_csv(Path(modelpath, 'deposition.out'), delim_whitespace=True, header=None, names=[ 'time', 'gammadep_over_Lsun', 'posdep_over_Lsun', 'total_dep_over_Lsun']) depdata.index.name = 'timestep' # no timesteps are given in deposition.out, so ensure that # the times in days match up with the times of our assumed timesteps for timestep, row in depdata.iterrows(): assert(abs(times[timestep] / row['time'] - 1) < 0.01) return depdata @lru_cache(maxsize=16) def get_timestep_times(modelpath): """Return a list of the mid time in days of each timestep from a spec.out file.""" try: specfilename = firstexisting(['spec.out.gz', 'spec.out', 'specpol.out'], path=modelpath) time_columns = pd.read_csv(specfilename, delim_whitespace=True, nrows=0) return time_columns.columns[1:] except FileNotFoundError: return [f'{tdays:.3f}' for tdays in get_timestep_times_float(modelpath, loc='mid')] @lru_cache(maxsize=16) def get_timestep_times_float(modelpath, loc='mid'): """Return a list of the time in days of each timestep.""" # custom timestep file tsfilepath = Path(modelpath, 'timesteps.out') if tsfilepath.exists(): dftimesteps = pd.read_csv(tsfilepath, delim_whitespace=True, escapechar='#', index_col='timestep') if loc == 'mid': return dftimesteps.tmid_days.values elif loc == 'start': return dftimesteps.tstart_days.values elif loc == 'end': return dftimesteps.tstart_days.values + dftimesteps.twidth_days.values elif loc == 'delta': return dftimesteps.twidth_days.values else: raise ValueError("loc must be one of 'mid', 'start', 'end', or 'delta'") # older versions of Artis always used logarithmic timesteps and didn't produce a timesteps.out file inputparams = get_inputparams(modelpath) tmin = inputparams['tmin'] dlogt = (math.log(inputparams['tmax']) - math.log(tmin)) / inputparams['ntstep'] timesteps = range(inputparams['ntstep']) if loc == 'mid': tmids = np.array([tmin * math.exp((ts + 0.5) * dlogt) for ts in timesteps]) return tmids elif loc == 'start': tstarts = np.array([tmin * math.exp(ts * dlogt) for ts in timesteps]) return tstarts elif loc == 'end': tends = np.array([tmin * math.exp((ts + 1) * dlogt) for ts in timesteps]) return tends elif loc == 'delta': tdeltas = np.array([tmin * (math.exp((ts + 1) * dlogt) - math.exp(ts * dlogt)) for ts in timesteps]) return tdeltas else: raise ValueError("loc must be one of 'mid', 'start', 'end', or 'delta'") def get_timestep_of_timedays(modelpath, timedays): """Return the timestep containing the given time in days.""" try: # could be a string like '330d' timedays_float = float(timedays.rstrip('d')) except AttributeError: timedays_float = float(timedays) arr_tstart = get_timestep_times_float(modelpath, loc='start') arr_tend = get_timestep_times_float(modelpath, loc='end') # to avoid roundoff errors, use the next timestep's tstart at each timestep's tend (t_width is not exact) arr_tend[:-1] = arr_tstart[1:] for ts, (tstart, tend) in enumerate(zip(arr_tstart, arr_tend)): if timedays_float >= tstart and timedays_float < tend: return ts raise ValueError(f"Could not find timestep bracketing time {timedays_float}") assert(False) return def get_time_range(modelpath, timestep_range_str, timemin, timemax, timedays_range_str): """Handle a time range specified in either days or timesteps.""" # assertions make sure time is specified either by timesteps or times in days, but not both! tstarts = get_timestep_times_float(modelpath, loc='start') tmids = get_timestep_times_float(modelpath, loc='mid') tends = get_timestep_times_float(modelpath, loc='end') timedays_is_specified = (timemin is not None and timemax is not None) or timedays_range_str is not None if timemin and timemin > tends[-1]: print(f"{get_model_name(modelpath)}: WARNING timemin {timemin} is after the last timestep at {tends[-1]:.1f}") return -1, -1, timemin, timemax elif timemax and timemax < tstarts[0]: print(f"{get_model_name(modelpath)}: WARNING timemax {timemax} is before the first timestep at {tstarts[0]:.1f}") return -1, -1, timemin, timemax if timestep_range_str is not None: if timedays_is_specified: raise ValueError("Cannot specify both time in days and timestep numbers.") if isinstance(timestep_range_str, str) and '-' in timestep_range_str: timestepmin, timestepmax = [int(nts) for nts in timestep_range_str.split('-')] else: timestepmin = int(timestep_range_str) timestepmax = timestepmin elif timedays_is_specified: timestepmin = None timestepmax = None if timedays_range_str is not None: if isinstance(timedays_range_str, str) and '-' in timedays_range_str: timemin, timemax = [float(timedays) for timedays in timedays_range_str.split('-')] else: timeavg = float(timedays_range_str) timestepmin = get_timestep_of_timedays(modelpath, timeavg) timestepmax = timestepmin timemin = tstarts[timestepmin] timemax = tends[timestepmax] # timedelta = 10 # timemin, timemax = timeavg - timedelta, timeavg + timedelta for timestep, tmid in enumerate(tmids): if tmid >= float(timemin): timestepmin = timestep break if timestepmin is None: print(f"Time min {timemin} is greater than all timesteps ({tstarts[0]} to {tends[-1]})") raise ValueError if not timemax: timemax = tends[-1] for timestep, tmid in enumerate(tmids): if tmid <= float(timemax): timestepmax = timestep if timestepmax < timestepmin: raise ValueError("Specified time range does not include any full timesteps.") else: raise ValueError("Either time or timesteps must be specified.") timesteplast = len(tmids) - 1 if timestepmax > timesteplast: print(f"Warning timestepmax {timestepmax} > timesteplast {timesteplast}") timestepmax = timesteplast time_days_lower = tstarts[timestepmin] time_days_upper = tends[timestepmax] return timestepmin, timestepmax, time_days_lower, time_days_upper def get_timestep_time(modelpath, timestep): """Return the time in days of a timestep number using a spec.out file.""" timearray = get_timestep_times_float(modelpath, loc='mid') if timearray is not None: return timearray[timestep] return -1 @lru_cache(maxsize=8) def get_model_name(path): """Get the name of an ARTIS model from the path to any file inside it. Name will be either from a special plotlabel.txt file if it exists or the enclosing directory name """ abspath = os.path.abspath(path) modelpath = abspath if os.path.isdir(abspath) else os.path.dirname(abspath) try: plotlabelfile = os.path.join(modelpath, 'plotlabel.txt') return open(plotlabelfile, mode='r').readline().strip() except FileNotFoundError: return os.path.basename(modelpath) def get_atomic_number(elsymbol): assert elsymbol is not None if elsymbol.title() in elsymbols: return elsymbols.index(elsymbol.title()) return -1 def decode_roman_numeral(strin): if strin.upper() in roman_numerals: return roman_numerals.index(strin.upper()) return -1 @lru_cache(maxsize=16) def get_ionstring(atomic_number, ionstage, spectral=True, nospace=False): if ionstage == 'ALL' or ionstage is None: return f'{elsymbols[atomic_number]}' elif spectral: return f"{elsymbols[atomic_number]}{' ' if not nospace else ''}{roman_numerals[ionstage]}" else: # ion notion e.g. Co+, Fe2+ if ionstage > 2: strcharge = r'$^{' + str(ionstage - 1) + r'{+}}$' elif ionstage == 2: strcharge = r'$^{+}$' else: strcharge = '' return f'{elsymbols[atomic_number]}{strcharge}' # based on code from https://gist.github.com/kgaughan/2491663/b35e9a117b02a3567c8107940ac9b2023ba34ced def parse_range(rng, dictvars={}): """Parse a string with an integer range and return a list of numbers, replacing special variables in dictvars.""" parts = rng.split('-') if len(parts) not in [1, 2]: raise ValueError("Bad range: '%s'" % (rng,)) parts = [int(i) if i not in dictvars else dictvars[i] for i in parts] start = parts[0] end = start if len(parts) == 1 else parts[1] if start > end: end, start = start, end return range(start, end + 1) def parse_range_list(rngs, dictvars={}): """Parse a string with comma-separated ranges or a list of range strings. Return a sorted list of integers in any of the ranges. """ if isinstance(rngs, list): rngs = ','.join(rngs) elif not hasattr(rngs, 'split'): return [rngs] return sorted(set(chain.from_iterable([parse_range(rng, dictvars) for rng in rngs.split(',')]))) def makelist(x): """If x is not a list (or is a string), make a list containing x.""" if x is None: return [] elif isinstance(x, (str, Path)): return [x, ] else: return x def trim_or_pad(requiredlength, *listoflistin): """Make lists equal in length to requiedlength either by padding with None or truncating""" for listin in listoflistin: listin = makelist(listin) if len(listin) < requiredlength: listout = listin.copy() listout.extend([None for _ in range(requiredlength - len(listin))]) elif len(listin) > requiredlength: listout = listin[:requiredlength] else: listout = listin assert(len(listout) == requiredlength) yield listout def flatten_list(listin): listout = [] for elem in listin: if isinstance(elem, list): listout.extend(elem) else: listout.append(elem) return listout def zopen(filename, mode): """Open filename.xz, filename.gz or filename.""" filenamexz = str(filename) if str(filename).endswith(".xz") else str(filename) + '.xz' filenamegz = str(filename) if str(filename).endswith(".gz") else str(filename) + '.gz' if os.path.exists(filenamexz): return lzma.open(filenamexz, mode) elif os.path.exists(filenamegz): return gzip.open(filenamegz, mode) else: return open(filename, mode) def firstexisting(filelist, path=Path('.')): """Return the first existing file in file list.""" fullpaths = [Path(path) / filename for filename in filelist] for fullpath in fullpaths: if fullpath.exists(): return fullpath raise FileNotFoundError(f'None of these files exist: {", ".join([str(x) for x in fullpaths])}') def readnoncommentline(file): """Read a line from the text file, skipping blank and comment lines that begin with #""" line = '' while not line.strip() or line.strip().lstrip().startswith('#'): line = file.readline() return line @lru_cache(maxsize=24) def get_file_metadata(filepath): import yaml filepath = Path(filepath) # check if the reference file (e.g. spectrum.txt) has an metadata file (spectrum.txt.meta.yml) individualmetafile = filepath.with_suffix(filepath.suffix + '.meta.yml') if individualmetafile.exists(): with individualmetafile.open('r') as yamlfile: metadata = yaml.load(yamlfile, Loader=yaml.FullLoader) return metadata # check if the metadata is in the big combined metadata file (todo: eliminate this file) combinedmetafile = Path(filepath.parent.resolve(), 'metadata.yml') if combinedmetafile.exists(): with combinedmetafile.open('r') as yamlfile: combined_metadata = yaml.load(yamlfile, Loader=yaml.FullLoader) metadata = combined_metadata.get(str(filepath), {}) return metadata return {} def get_filterfunc(args, mode='interp'): """Using command line arguments to determine the appropriate filter function.""" if hasattr(args, "filtermovingavg") and args.filtermovingavg > 0: def filterfunc(ylist): n = args.filtermovingavg arr_padded = np.pad(ylist, (n // 2, n - 1 - n // 2), mode='edge') return np.convolve(arr_padded, np.ones((n,)) / n, mode='valid') elif hasattr(args, "filtersavgol") and args.filtersavgol: import scipy.signal window_length, poly_order = [int(x) for x in args.filtersavgol] def filterfunc(ylist): return scipy.signal.savgol_filter(ylist, window_length, poly_order, mode=mode) print("Applying Savitzky–Golay filter") else: filterfunc = None return filterfunc def join_pdf_files(pdf_list, modelpath_list): from PyPDF2 import PdfFileMerger merger = PdfFileMerger() for pdf, modelpath in zip(pdf_list, modelpath_list): fullpath = firstexisting([pdf], path=modelpath) merger.append(open(fullpath, 'rb')) os.remove(fullpath) resultfilename = f'{pdf_list[0].split(".")[0]}-{pdf_list[-1].split(".")[0]}' with open(f'{resultfilename}.pdf', 'wb') as resultfile: merger.write(resultfile) print(f'Files merged and saved to {resultfilename}.pdf') @lru_cache(maxsize=2) def get_bflist(modelpath, returntype='dict'): compositiondata = get_composition_data(modelpath) bflist = {} with zopen(Path(modelpath, 'bflist.dat'), 'rt') as filein: bflistcount = int(filein.readline()) for k in range(bflistcount): rowints = [int(x) for x in filein.readline().split()] i, elementindex, ionindex, level = rowints[:4] if len(rowints) > 4: upperionlevel = rowints[4] else: upperionlevel = -1 atomic_number = compositiondata.Z[elementindex] ion_stage = ionindex + compositiondata.lowermost_ionstage[elementindex] bflist[i] = (atomic_number, ion_stage, level, upperionlevel) return bflist @lru_cache(maxsize=16) def get_linelist(modelpath, returntype='dict'): """Load linestat.out containing transitions wavelength, element, ion, upper and lower levels.""" with zopen(Path(modelpath, 'linestat.out'), 'rt') as linestatfile: lambda_angstroms = [float(wl) * 1e+8 for wl in linestatfile.readline().split()] nlines = len(lambda_angstroms) atomic_numbers = [int(z) for z in linestatfile.readline().split()] assert len(atomic_numbers) == nlines ion_stages = [int(ion_stage) for ion_stage in linestatfile.readline().split()] assert len(ion_stages) == nlines # the file adds one to the levelindex, i.e. lowest level is 1 upper_levels = [int(levelplusone) - 1 for levelplusone in linestatfile.readline().split()] assert len(upper_levels) == nlines lower_levels = [int(levelplusone) - 1 for levelplusone in linestatfile.readline().split()] assert len(lower_levels) == nlines if returntype == 'dict': linetuple = namedtuple('line', 'lambda_angstroms atomic_number ionstage upperlevelindex lowerlevelindex') linelistdict = { index: linetuple(lambda_a, Z, ionstage, upper, lower) for index, lambda_a, Z, ionstage, upper, lower in zip(range(nlines), lambda_angstroms, atomic_numbers, ion_stages, upper_levels, lower_levels)} return linelistdict elif returntype == 'dataframe': # considering our standard lineline is about 1.5 million lines, # using a dataframe make the lookup process very slow dflinelist = pd.DataFrame({ 'lambda_angstroms': lambda_angstroms, 'atomic_number': atomic_numbers, 'ionstage': ion_stages, 'upperlevelindex': upper_levels, 'lowerlevelindex': lower_levels, }) dflinelist.index.name = 'linelistindex' return dflinelist @lru_cache(maxsize=8) def get_npts_model(modelpath): """Return the number of cell in the model.txt.""" with Path(modelpath, 'model.txt').open('r') as modelfile: npts_model = int(modelfile.readline()) return npts_model @lru_cache(maxsize=8) def get_nprocs(modelpath): """Return the number of MPI processes specified in input.txt.""" return int(Path(modelpath, 'input.txt').read_text().split('\n')[21].split('#')[0]) @lru_cache(maxsize=8) def get_inputparams(modelpath): """Return parameters specified in input.txt.""" params = {} with Path(modelpath, 'input.txt').open('r') as inputfile: params['pre_zseed'] = int(readnoncommentline(inputfile).split('#')[0]) # number of time steps params['ntstep'] = int(readnoncommentline(inputfile).split('#')[0]) # number of start and end time step params['itstep'], params['ftstep'] = [int(x) for x in readnoncommentline(inputfile).split('#')[0].split()] params['tmin'], params['tmax'] = [float(x) for x in readnoncommentline(inputfile).split('#')[0].split()] params['nusyn_min'], params['nusyn_max'] = [ (float(x) * u.MeV / const.h).to('Hz') for x in readnoncommentline(inputfile).split('#')[0].split()] # number of times for synthesis params['nsyn_time'] = int(readnoncommentline(inputfile).split('#')[0]) # start and end times for synthesis params['nsyn_time_start'], params['nsyn_time_end'] = [float(x) for x in readnoncommentline(inputfile).split('#')[0].split()] params['n_dimensions'] = int(readnoncommentline(inputfile).split('#')[0]) # there are more parameters in the file that are not read yet... return params @lru_cache(maxsize=16) def get_runfolder_timesteps(folderpath): """Get the set of timesteps covered by the output files in an ARTIS run folder.""" folder_timesteps = set() try: with zopen(Path(folderpath, 'estimators_0000.out'), 'rt') as estfile: restart_timestep = -1 for line in estfile: if line.startswith('timestep '): timestep = int(line.split()[1]) if (restart_timestep < 0 and timestep != 0 and 0 not in folder_timesteps): # the first timestep of a restarted run is duplicate and should be ignored restart_timestep = timestep if timestep != restart_timestep: folder_timesteps.add(timestep) except FileNotFoundError: pass return tuple(folder_timesteps) def get_runfolders(modelpath, timestep=None, timesteps=None): """Get a list of folders containing ARTIS output files from a modelpath, optionally with a timestep restriction. The folder list may include non-ARTIS folders if a timestep is not specified.""" folderlist_all = tuple(sorted([child for child in Path(modelpath).iterdir() if child.is_dir()]) + [Path(modelpath)]) folder_list_matching = [] if (timestep is not None and timestep > -1) or (timesteps is not None and len(timesteps) > 0): for folderpath in folderlist_all: folder_timesteps = get_runfolder_timesteps(folderpath) if timesteps is None and timestep is not None and timestep in folder_timesteps: return (folderpath,) elif timesteps is not None and any([ts in folder_timesteps for ts in timesteps]): folder_list_matching.append(folderpath) return tuple(folder_list_matching) return [folderpath for folderpath in folderlist_all if get_runfolder_timesteps(folderpath)] def get_mpiranklist(modelpath, modelgridindex=None): if modelgridindex is None or modelgridindex == []: return range(min(get_nprocs(modelpath), get_npts_model(modelpath))) else: try: mpiranklist = set() for mgi in modelgridindex: if mgi < 0: return range(min(get_nprocs(modelpath), get_npts_model(modelpath))) else: mpiranklist.add(get_mpirankofcell(mgi, modelpath=modelpath)) return sorted(list(mpiranklist)) except TypeError: # in case modelgridindex is a single number rather than an iterable if modelgridindex < 0: return range(min(get_nprocs(modelpath), get_npts_model(modelpath))) else: return [get_mpirankofcell(modelgridindex, modelpath=modelpath)] def get_cellsofmpirank(mpirank, modelpath): """Return an iterable of the cell numbers processed by a given MPI rank.""" npts_model = get_npts_model(modelpath) nprocs = get_nprocs(modelpath) assert mpirank < nprocs nblock = npts_model // nprocs n_leftover = npts_model % nprocs if mpirank < n_leftover: ndo = nblock + 1 nstart = mpirank * (nblock + 1) else: ndo = nblock nstart = n_leftover + mpirank * nblock return list(range(nstart, nstart + ndo)) def get_mpirankofcell(modelgridindex, modelpath): """Return the rank number of the MPI process responsible for handling a specified cell's updating and output.""" npts_model = get_npts_model(modelpath) assert modelgridindex < npts_model nprocs = get_nprocs(modelpath) if nprocs > npts_model: mpirank = modelgridindex else: nblock = npts_model // nprocs n_leftover = npts_model % nprocs if modelgridindex <= n_leftover * (nblock + 1): mpirank = modelgridindex // (nblock + 1) else: mpirank = n_leftover + (modelgridindex - n_leftover * (nblock + 1)) // nblock assert modelgridindex in get_cellsofmpirank(mpirank, modelpath) return mpirank def get_artis_constants(modelpath=None, srcpath=None, printdefs=False): # get artis options specified as preprocessor macro definitions in artisoptions.h and other header files if not srcpath: srcpath = Path(modelpath, 'artis') if not modelpath: raise ValueError('Either modelpath or srcpath must be specified in call to get_defines()') cfiles = [ # Path(srcpath, 'constants.h'), # Path(srcpath, 'decay.h'), Path(srcpath, 'artisoptions.h'), # Path(srcpath, 'sn3d.h'), ] definedict = { 'true': True, 'false': False, } for filepath in cfiles: definedict.update(parse_cdefines(srcfilepath=filepath)) # evaluate booleans, numbers, and references to other constants for k, strvalue in definedict.copy().items(): try: # definedict[k] = eval(strvalue, definedict) # print(f"{k} = '{strvalue}' = {definedict[k]}") pass except SyntaxError: pass # print(f"{k} = '{strvalue}' = (COULD NOT EVALUATE)") except TypeError: pass # print(f"{k} = '{strvalue}' = (COULD NOT EVALUATE)") # if printdefs: # for k in definedict: # print(f"{k} = '{definedict[k]}'") return definedict def parse_cdefines(srcfilepath=None, printdefs=False): # adapted from h2py.py in Python source import re # p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+') p_define = re.compile(r'^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)+') p_const = re.compile(r'(?:\w+\s+)([a-zA-Z_=][a-zA-Z0-9_=]*)*(?<!=)=(?!=)') p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?') p_cpp_comment = re.compile('//.*') ignores = [p_comment, p_cpp_comment] p_char = re.compile(r"'(\\.[^\\]*|[^\\])'") p_hex = re.compile(r"0x([0-9a-fA-F]+)L?") def pytify(body): # replace ignored patterns by spaces for p in ignores: body = p.sub(' ', body) # replace char literals by ord(...) body = p_char.sub('ord(\\0)', body) # Compute negative hexadecimal constants start = 0 UMAX = 2*(sys.maxsize+1) while 1: m = p_hex.search(body, start) if not m: break s, e = m.span() val = int(body[slice(*m.span(1))], 16) if val > sys.maxsize: val -= UMAX body = body[:s] + "(" + str(val) + ")" + body[e:] start = s + 1 return body definedict = {} lineno = 0 with open(srcfilepath, 'r') as optfile: while 1: line = optfile.readline() if not line: break lineno = lineno + 1 match = p_define.match(line) if match: # gobble up continuation lines while line[-2:] == '\\\n': nextline = optfile.readline() if not nextline: break lineno = lineno + 1 line = line + nextline name = match.group(1) body = line[match.end():] body = pytify(body) definedict[name] = body.strip() match = p_const.match(line) if match: print('CONST', tuple(p_const.findall(line))) # if '=' in line and ';' in line: # tokens = line.replace('==', 'IGNORE').replace('=', ' = ').split() # varname = tokens.indexof('=')[-1] if printdefs: for k in definedict: print(f"{k} = '{definedict[k]}'") return definedict
mit
Nikea/VisTrails
vistrails/tests/runtestsuite.py
2
20120
#!/usr/bin/env python # pragma: no testimport ############################################################################### ## ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the University of Utah nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### """Runs all tests available in VisTrails modules by importing all of them, stealing the classes that look like unit tests, and running all of them. runtestsuite.py also reports all VisTrails modules that don't export any unit tests, as a crude measure of code coverage. """ import atexit from distutils.version import LooseVersion #import doctest import locale import os import sys import traceback from optparse import OptionParser import platform import re import shutil import tempfile if 'vistrails' not in sys.modules: # Makes sure we can import modules as if we were running VisTrails # from the root directory _this_dir = os.path.dirname(os.path.realpath(__file__)) _root_directory = os.path.realpath(os.path.join(_this_dir, '..')) sys.path.insert(0, os.path.realpath(os.path.join(_root_directory, '..'))) # Use a different temporary directory test_temp_dir = tempfile.mkdtemp(prefix='vt_testsuite_') tempfile.tempdir = test_temp_dir @apply class clean_tempdir(object): def __init__(self): atexit.register(self.clean) self.listdir = os.listdir self.isdir = os.path.isdir self.test_temp_dir = test_temp_dir self.rmtree = shutil.rmtree self.out = sys.stdout.write def clean(self): nb_dirs = 0 nb_files = 0 for f in self.listdir(self.test_temp_dir): if self.isdir(f): nb_dirs += 1 else: nb_files += 1 if nb_dirs > 0 or nb_files > 0: self.out("Warning: %d dirs and %d files were left behind in " "tempdir, cleaning up\n" % (nb_dirs, nb_files)) self.rmtree(self.test_temp_dir, ignore_errors=True) # Parse the command-line usage = "Usage: %prog [options] [module1 module2 ...]" parser = OptionParser(usage=usage) parser.add_option("-V", "--verbose", action="store", type="int", default=0, dest="verbose", help="set verboseness level(0--2, default=0, " "higher means more verbose)") parser.add_option("-v", "--vistrails-verbose", action="store", type="int", default=0, dest="debugLevel", help="set the debugLevel in VisTrails (0--2, default=0)") parser.add_option("-e", "--examples", action="store_true", default=False, help="run vistrails examples") parser.add_option("-i", "--images", action="store_true", default=False, help="perform image comparisons") parser.add_option("--installbundles", action='store_true', default=False, help=("Attempt to install missing Python packages " "automatically")) parser.add_option("-S", "--startup", action="store", type="str", default=None, dest="dotVistrails", help="Set startup file (default is temporary directory)") parser.add_option('-L', '--locale', action='store', type='str', default='', dest='locale', help="set locale to this string") parser.add_option('-D', '--debug', action='store_true', default=False, help="start interactive debugger on unexpected error") parser.add_option('--no-unbuffered', action='store_false', dest='unbuffered', default=True, help="Don't make output stream unbuffered") (options, test_modules) = parser.parse_args() # remove empty strings test_modules = filter(len, test_modules) verbose = options.verbose locale.setlocale(locale.LC_ALL, options.locale or '') test_examples = options.examples test_images = options.images installbundles = options.installbundles dotVistrails = options.dotVistrails debug_mode = options.debug vistrails_verbose = options.debugLevel # Makes stdout unbuffered, so python -u is not needed class Unbuffered(object): def __init__(self, stream): self.stream = stream def write(self, data): self.stream.write(data) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) if options.unbuffered: sys.stdout = Unbuffered(sys.stdout) sys.stderr = Unbuffered(sys.stderr) # Use PyQt API v2 def setNewPyQtAPI(): try: import sip # We now use the new PyQt API - IPython needs it sip.setapi('QString', 2) sip.setapi('QVariant', 2) except Exception: print "Could not set PyQt API, is PyQt4 installed?" setNewPyQtAPI() # Log to the console import vistrails.core.debug vistrails.core.debug.DebugPrint.getInstance().log_to_console() import vistrails.tests import vistrails.core import vistrails.core.db.io import vistrails.core.db.locator from vistrails.core import debug import vistrails.gui.application from vistrails.core.system import vistrails_root_directory, \ vistrails_examples_directory from vistrails.core.packagemanager import get_package_manager # VisTrails does funny stuff with unittest/unittest2, be sure to load that # after vistrails import unittest root_directory = os.path.realpath(vistrails_root_directory()) ############################################################################### # Testing Examples EXAMPLES_PATH = vistrails_examples_directory() #dictionary of examples that will be run with the workflows that will be ignored VT_EXAMPLES = { 'EMBOSS_webservices.vt': ["ProphetOutput"], 'KEGGPathway.vt': [], 'KEGG_SearchEntities_webservice.vt': [], 'KEGG_webservices.vt': [], 'brain_vistrail.vt': [], 'chebi_webservice.vt': [], 'head.vt': [], 'infovis.vt': [], 'noaa_webservices.vt': [], 'offscreen.vt': [], 'plot.vt': [], 'spx.vt': [], 'structure_or_id_webservice.vt': [], 'terminator.vt': ["Isosurface Script"], 'triangle_area.vt': [], 'vtk.vt': [], 'vtk_book_3rd_p189.vt': ["quadric", "SmapleFunction", "Almost there"], 'vtk_book_3rd_p193.vt': ["modules", "connections", "lookup table"], 'vtk_http.vt': [], } ############################################################################### # Utility def sub_print(s, overline=False): """Prints line with underline (and optionally overline) ASCII dashes.""" if overline: print "-" * len(s) print s print "-" * len(s) ############################################################################### if len(test_modules) > 0: test_modules = test_modules else: test_modules = None if os.path.exists(EXAMPLES_PATH): test_images = True def module_filter(name): if test_modules is None: return True for mod in test_modules: if name.startswith(mod): return True return False ############################################################################### # reinitializing arguments and options so VisTrails does not try parsing them sys.argv = sys.argv[:1] # creates the app so that testing can happen # We need the windows so we can test events, etc. optionsDict = { 'batch': False, 'executionLog': False, 'singleInstance': False, 'installBundles': installbundles, 'enablePackagesSilently': True, 'handlerDontAsk': True, 'developerDebugger': debug_mode, 'debugLevel': vistrails_verbose } if dotVistrails: optionsDict['dotVistrails'] = dotVistrails else: optionsDict['spawned'] = True v = vistrails.gui.application.start_application(optionsDict) if v != 0: app = vistrails.gui.application.get_vistrails_application() if app: app.finishSession() sys.exit(v) # make sure that fixedCellSize is turned on spreadsheet_conf = get_package_manager().get_package_configuration("spreadsheet") spreadsheet_conf.fixedCellSize = True # disable first vistrail app = vistrails.gui.application.get_vistrails_application() app.builderWindow.auto_view = False app.builderWindow.close_all_vistrails(True) print "Test Suite for VisTrails" print "Locale settings: %s" % ', '.join('%s: %s' % (s, locale.setlocale(getattr(locale, s), None)) for s in ('LC_ALL', 'LC_TIME')) print "Running on %s" % ', '.join(platform.uname()) print "Python is %s" % sys.version try: from PyQt4 import QtCore print "Using PyQt4 %s with Qt %s" % (QtCore.PYQT_VERSION_STR, QtCore.qVersion()) except ImportError: print "PyQt4 not available" for pkg in ('numpy', 'scipy', 'matplotlib'): try: ipkg = __import__(pkg, globals(), locals(), [], -1) print "Using %s %s" % (pkg, ipkg.__version__) except ImportError: print "%s not available" % pkg try: import vtk print "Using vtk %s" % vtk.vtkVersion().GetVTKVersion() except ImportError: print "vtk not available" print "" tests_passed = True main_test_suite = unittest.TestSuite() test_loader = unittest.TestLoader() import_skip_regex = re.compile(r'(?i)# *pragma[: ]*no *testimport') if test_modules: sub_print("Trying to import some of the modules") else: sub_print("Trying to import all modules") for (p, subdirs, files) in os.walk(root_directory): # skip subversion subdirectories if p.find('.svn') != -1 or p.find('.git') != -1 : continue for filename in files: # skip files that don't look like VisTrails python modules if not filename.endswith('.py'): continue module_file = os.path.join(p, filename) module = os.path.join("vistrails", p[len(root_directory)+1:], filename[:-3]) if (module.startswith(os.sep) or ('#' in module)): continue # use qualified import names with periods instead of # slashes to avoid duplicates in sys.modules module = module.replace('/','.') module = module.replace('\\','.') if module.endswith('__init__'): module = module[:-9] if not module_filter(module): continue if module.startswith('vistrails.tests.resources'): continue if ('.system.' in module and not module.endswith('__init__')): continue with open(module_file) as fp: l = fp.readline() if l.startswith('#!'): # shebang l = fp.readline() if import_skip_regex.match(l): if verbose >= 1: print >>sys.stderr, ("Skipping %s, not an importable " "module" % module) continue m = None try: if '.' in module: m = __import__(module, globals(), locals(), ['foo']) else: m = __import__(module) except BaseException: print >>sys.stderr, "ERROR: Could not import module: %s" % module if verbose >= 1: traceback.print_exc(file=sys.stderr) continue # Load the unittest TestCases suite = test_loader.loadTestsFromModule(m) # Load the doctests #try: # suite.addTests(doctest.DocTestSuite(m)) #except ValueError: # pass # No doctest is fine, we check that some tests exist later # The doctests are currently opt-in; a load_tests method can be # defined to build a DocTestSuite # This is because some modules have interpreter-formatted examples that # are NOT doctests, and because mining the codebase for doctests is # painfully slow main_test_suite.addTests(suite) if suite.countTestCases() == 0 and verbose >= 1: print >>sys.stderr, "WARNING: module has no tests: %s" % module elif verbose >= 2: print >>sys.stderr, "OK: module as %d test cases: %s" % ( suite.countTestCases(), module) sub_print("Imported modules. Running %d tests%s..." % ( main_test_suite.countTestCases(), ", and thumbnails comparison" if test_images else ''), overline=True) ############## TEST VISTRAIL IMAGES #################### # Compares thumbnails with the generated images to detect broken visualizations image_tests = [("terminator.vt", [("terminator_isosurface", "Isosurface"), ("terminator_VRSW", "Volume Rendering SW"), ("terminator_CPSW", "Clipping Plane SW"), ("terminator_CRSW", "Combined Rendering SW"), ("terminator_ISSW", "Image Slices SW")]) ] compare_use_vtk = False try: import vtk if LooseVersion(vtk.vtkVersion().GetVTKVersion()) >= LooseVersion('5.8.0'): compare_use_vtk = True except ImportError: pass if compare_use_vtk: def compare_thumbnails(prev, next): #vtkImageDifference assumes RGB, so strip alpha def removeAlpha(file): freader = vtk.vtkPNGReader() freader.SetFileName(file) removealpha = vtk.vtkImageExtractComponents() removealpha.SetComponents(0,1,2) removealpha.SetInputConnection(freader.GetOutputPort()) removealpha.Update() return removealpha.GetOutput() #do the image comparison a = removeAlpha(prev) b = removeAlpha(next) idiff = vtk.vtkImageDifference() idiff.SetInput(a) idiff.SetImage(b) idiff.Update() return idiff.GetThresholdedError() else: try: from scipy.misc import imread except ImportError: imread = None if test_images: print "Warning: old VTK version detected, NOT comparing thumbnails" if imread is not None: def compare_thumbnails(prev, next): prev_img = imread(prev) next_img = imread(next) assert len(prev_img.shape) == 3 assert len(next_img.shape) == 3 if prev_img.shape[:2] == next_img.shape[:2]: return 0 else: return float('Inf') else: def compare_thumbnails(prev, next): if os.path.isfile(prev) and os.path.isfile(next): return 0 else: return float('Inf') def image_test_generator(vtfile, version): from vistrails.core.db.locator import FileLocator from vistrails.core.db.io import load_vistrail import vistrails.core.console_mode def test(self): try: errs = [] filename = os.path.join(EXAMPLES_PATH, vtfile) locator = FileLocator(os.path.abspath(filename)) (v, abstractions, thumbnails, mashups) = load_vistrail(locator) errs = vistrails.core.console_mode.run( [(locator, version)], update_vistrail=False, extra_info={'compare_thumbnails': compare_thumbnails}) if len(errs) > 0: for err in errs: print(" *** Error in %s:%s:%s -- %s" % err) self.fail(str(err)) except Exception, e: self.fail(debug.format_exception(e)) return test class TestVistrailImages(unittest.TestCase): pass if test_images: for vt, t in image_tests: for name, version in t: test_name = 'test_%s' % name test = image_test_generator(vt, version) setattr(TestVistrailImages, test_name, test) main_test_suite.addTest(TestVistrailImages(test_name)) ############## RUN TEST SUITE #################### class TestResult(unittest.TextTestResult): def addSkip(self, test, reason): self.stream.writeln("skipped '{0}': {1}".format(str(test), reason)) super(TestResult, self).addSkip(test, reason) runner = unittest.TextTestRunner( verbosity=max(verbose, 1), resultclass=TestResult) result = runner.run(main_test_suite) if not result.wasSuccessful(): tests_passed = False sub_print("Tests finished.", overline=True) if test_examples: import vistrails.core.console_mode sub_print("Testing examples:") summary = {} nworkflows = 0 nvtfiles = 0 for vtfile in VT_EXAMPLES.keys(): try: errs = [] filename = os.path.join(EXAMPLES_PATH, vtfile) print filename locator = vistrails.core.db.locator.FileLocator(os.path.abspath(filename)) (v, abstractions, thumbnails, mashups) = vistrails.core.db.io.load_vistrail(locator) w_list = [] for version,tag in v.get_tagMap().iteritems(): if tag not in VT_EXAMPLES[vtfile]: w_list.append((locator,version)) nworkflows += 1 if len(w_list) > 0: errs = vistrails.core.console_mode.run(w_list, update_vistrail=False) summary[vtfile] = errs except Exception, e: errs.append((vtfile,"None", "None", debug.format_exception(e))) summary[vtfile] = errs nvtfiles += 1 print "-" * 79 print "Summary of Examples: %s workflows in %s vistrail files" % ( nworkflows, nvtfiles) print "" errors = False for vtfile, errs in summary.iteritems(): print vtfile if len(errs) > 0: for err in errs: print(" *** Error in %s:%s:%s -- %s" % err) errors = True else: print " Ok." print "-" * 79 if errors: tests_passed = False sub_print("There were errors. See summary for more information") else: sub_print("Examples ran successfully.") vistrails.gui.application.get_vistrails_application().finishSession() vistrails.gui.application.stop_application() # Test Runners can use the return value to know if the tests passed sys.exit(0 if tests_passed else 1)
bsd-3-clause
johnbachman/belpy
indra/sources/trrust/processor.py
4
2252
from copy import deepcopy from indra.databases import hgnc_client from indra.statements import Agent, IncreaseAmount, DecreaseAmount, Evidence class TrrustProcessor(object): """Processor to extract INDRA Statements from Trrust data frame. Attributes ---------- df : pandas.DataFrame The Trrust table to process. statements : list[indra.statements.Statement] The list of INDRA Statements extracted from the table. """ def __init__(self, df): self.df = df self.statements = [] def extract_statements(self): """Process the table to extract Statements.""" for _, (tf, target, effect, refs) in self.df.iterrows(): tf_agent = get_grounded_agent(tf) target_agent = get_grounded_agent(target) if effect == 'Activation': stmt_cls = IncreaseAmount elif effect == 'Repression': stmt_cls = DecreaseAmount else: continue pmids = refs.split(';') for pmid in pmids: stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid) self.statements.append(stmt) def make_stmt(stmt_cls, tf_agent, target_agent, pmid): """Return a Statement based on its type, agents, and PMID.""" ev = Evidence(source_api='trrust', pmid=pmid) return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent), evidence=[ev]) def get_grounded_agent(gene_name): """Return a grounded Agent based on an HGNC symbol.""" db_refs = {'TEXT': gene_name} if gene_name in hgnc_map: gene_name = hgnc_map[gene_name] hgnc_id = hgnc_client.get_hgnc_id(gene_name) if not hgnc_id: hgnc_id = hgnc_client.get_current_hgnc_id(gene_name) if hgnc_id: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id and ',' not in up_id: db_refs['UP'] = up_id agent = Agent(gene_name, db_refs=db_refs) return agent hgnc_map = { 'CTGF': 'CCN2', 'CYR61': 'CCN1', 'MKL1': 'MRTFA', 'NOV': 'CCN3', 'RFWD2': 'COP1', 'SALL4A': 'SALL4', 'STAT5': 'STAT5A', 'TRAP': 'ACP5', 'AES': 'TLE5', 'SEPT7': 'SEPTIN7' }
mit
rubikloud/scikit-learn
sklearn/metrics/cluster/bicluster.py
359
2797
from __future__ import division import numpy as np from sklearn.utils.linear_assignment_ import linear_assignment from sklearn.utils.validation import check_consistent_length, check_array __all__ = ["consensus_score"] def _check_rows_and_columns(a, b): """Unpacks the row and column arrays and checks their shape.""" check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) a_rows, a_cols = map(checks, a) b_rows, b_cols = map(checks, b) return a_rows, a_cols, b_rows, b_cols def _jaccard(a_rows, a_cols, b_rows, b_cols): """Jaccard coefficient on the elements of the two biclusters.""" intersection = ((a_rows * b_rows).sum() * (a_cols * b_cols).sum()) a_size = a_rows.sum() * a_cols.sum() b_size = b_rows.sum() * b_cols.sum() return intersection / (a_size + b_size - intersection) def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b) n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array(list(list(similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)) for i in range(n_a))) return result def consensus_score(a, b, similarity="jaccard"): """The similarity of two sets of biclusters. Similarity between individual biclusters is computed. Then the best matching between sets is found using the Hungarian algorithm. The final score is the sum of similarities divided by the size of the larger set. Read more in the :ref:`User Guide <biclustering>`. Parameters ---------- a : (rows, columns) Tuple of row and column indicators for a set of biclusters. b : (rows, columns) Another set of biclusters like ``a``. similarity : string or function, optional, default: "jaccard" May be the string "jaccard" to use the Jaccard coefficient, or any function that takes four arguments, each of which is a 1d indicator vector: (a_rows, a_columns, b_rows, b_columns). References ---------- * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis for bicluster acquisition <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__. """ if similarity == "jaccard": similarity = _jaccard matrix = _pairwise_similarity(a, b, similarity) indices = linear_assignment(1. - matrix) n_a = len(a[0]) n_b = len(b[0]) return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
bsd-3-clause
alexrudy/AstroObject
Examples/spectra.py
1
2361
#!/usr/bin/env python # -*- coding: utf-8 -*- # # spectra.py # AstroObject # # Created by Alexander Rudy on 2012-04-17. # Copyright 2012 Alexander Rudy. All rights reserved. # import numpy as np import matplotlib.pyplot as plt from AstroObject.loggers import * from AstroObject.anaspec import InterpolatedSpectrum,GaussianSpectrum,FlatSpectrum,BlackBodySpectrum,UnitarySpectrum,Resolver from AstroObject.spectra import SpectraStack from AstroObject.util import npArrayInfo from AstroObject.util.functions import get_resolution_spectrum LOG = logging.getLogger('AstroObject') LOG.configure_from_file('Examples/config.yml') LOG.start() WAVELENGTHS = ((np.arange(98)+1)/2.0 + 1.0) * 1e-7 HIGH_R =WAVELENGTHS[1:]/np.diff(WAVELENGTHS) WAVELENGTHS_LOWR = ((np.arange(23)+0.25)*2.0 + 1.0) * 1e-7 LOWR = WAVELENGTHS_LOWR[1:]/np.diff(WAVELENGTHS_LOWR)/2 VALID = np.array([(np.arange(50) + 1.0) * 1e-7, np.sin(np.arange(50)/2.0)+2.0 + np.arange(50)/25.0]) OBJECT = SpectraStack() OBJECT.read("Examples/SNIa.R1000.dat") OBJECT["Interpolateable"] = InterpolatedSpectrum(OBJECT.d,"Interpolateable") wl, rs = get_resolution_spectrum(np.min(OBJECT.f.wavelengths),np.max(OBJECT.f.wavelengths),200) OBJECT["Raw Data"] = OBJECT.f(wavelengths = wl, resolution = rs, method = 'resample') OBJECT.show() wl, rs = get_resolution_spectrum(np.min(OBJECT.f.wavelengths),np.max(OBJECT.f.wavelengths),50) OBJECT["Low Res Data"] = OBJECT["Interpolateable"](wavelengths = wl, resolution = rs, method = 'resample') OBJECT.show() for line in OBJECT.info(): print line print "Valid:",OBJECT.valid() OBJECT["Raw Data"].logarize() OBJECT["Logarized"] = OBJECT["Raw Data"] for line in OBJECT.info(): print line print "Valid:",OBJECT.valid() OBJECT.show() OBJECT["Raw Data"].linearize() OBJECT["Linearized"] = OBJECT["Raw Data"] for line in OBJECT.info(): print line print "Valid:",OBJECT.valid() OBJECT.show() try: for line in OBJECT.info(): print line print "Valid:",OBJECT.valid() print "Is Log:",OBJECT.f.x_is_log() print "Is Linear:",OBJECT.f.x_is_linear() print "dx, dlogx:",np.mean(OBJECT.f.dx()),np.mean(OBJECT.f.dlogx()) OBJECT["Raw Data"].logarize(strict=True) OBJECT["Logarized Strict"] = OBJECT["Raw Data"] OBJECT.show() except Exception, e: print e plt.legend(loc=2) plt.title("Wavelength Scale Tests") plt.show()
gpl-3.0
blakeboswell/valence
build/lib/pyvalence/build/agilentgcms.py
1
20264
""" Read GCMS files produced by Agilent """ import re import csv import os import struct import numpy as np import pandas as pd import scipy.sparse class AgilentGcmsTableBase(object): """ Base class for Agilent GCMS builders. This class should not be instantiated directly. """ @classmethod def _clean_name(cls, item): """ return clean name from colstr item. """ return item[0] @classmethod def _np_type(cls, item): """ return numpy type from colstr item. """ return item[1] @classmethod def _pd_columns(cls, header, colstr): """ return column headers from colstr item. """ return [cls._clean_name(colstr[col]) for col in header] @classmethod def _column_structure(cls, header, keys): """ match table colstr with header read from file """ for key, val in keys.items(): if set(header) == set(val.keys()): return key, val raise Exception( 'expected column structure: {}, found {}'.format( val.keys(), header) ) def __init__(self, col_keys, reader, file_path): if self.__class__.__name__ == 'AgilentGcmsTableBase': raise ValueError('This class is not intended' 'to be instantiated directly.') self.col_keys = col_keys self._meta, self._tables = reader(file_path) self._data = {} def _as_dataframe(self, header, data): """ transform results of reader function to pandas.DataFrame with appropriate column names and data types """ key, colstr = self._column_structure(header, self.col_keys) df = (pd.DataFrame(data, columns=self._pd_columns(header, colstr)) .apply(pd.to_numeric, errors='ignore')) return (key, df) def _build_data(self): """ convert list of tables to dictionary of pandas dataframe """ def build(tbl): return self._as_dataframe(tbl[0], tbl[1:]) return {key: df for key, df in map(build, self._tables)} def _access(self, key): """ provide access to key in data with appropriate exception handling """ if not self._data: self._data = self._build_data() if key not in self._data: self._data[key] = None return self._data[key] def __getitem__(self, key): """ provide access to key in data """ return self._access(key) class AgilentGcmsResults(AgilentGcmsTableBase): """ Manages reading of Agilent RESULT.CSV and mutation of tables into single pandas df Arguments: file_path: path to RESULTS.CSV file """ __tic_colstr = { 'Header=': ('header=', 'O'), 'Peak': ('peak', 'i4'), 'R.T.': ('rt', 'f4'), 'First': ('first', 'i4'), 'Max': ('max', 'i4'), 'Last': ('last', 'i4'), 'PK TY': ('pk_ty', 'O'), 'Height': ('height', 'i4'), 'Area': ('area', 'i4'), 'Pct Max': ('pct_max', 'f4'), 'Pct Total': ('pct_total', 'f4') } __lib_colstr = { 'Header=': ('header=', 'O'), 'PK': ('pk', 'i4'), 'RT': ('rt', 'f4'), 'Area Pct': ('pct_area', 'f4'), 'Library/ID': ('library_id', 'O'), 'Ref': ('ref', 'i4'), 'CAS': ('cas', 'O'), 'Qual': ('qual', 'i4'), } __fid_colstr = { 'Header=': ('header=', 'O'), 'Peak': ('peak', 'i4'), 'R.T.': ('rt', 'f4'), 'Start': ('first', 'i4'), 'End': ('end', 'i4'), 'PK TY': ('pk_ty', '0'), 'Height': ('height', 'i4'), 'Area': ('area', 'i4'), 'Pct Max': ('pct_max', 'f4'), 'Pct Total': ('pct_total', 'f4') } __colstr_key = { 'tic': __tic_colstr, 'lib': __lib_colstr, 'fid': __fid_colstr } @staticmethod def _results_reader(file_path): """ read Agilent RESULTS.CSV into list of lists where each list item is the lines representing a tic, fid, or lib table """ def istablerow(line): """ return true if line is table row """ return re.match('\d+=', line) def isheader(line): """ return true if line is header """ return line[0] == 'Header=' def seek_rows(header, gen): """ gen is at position after header seek until no more table rows or stopiter exception """ table = [header] try: while True: line = next(gen) if not istablerow(line[0]): break table.append(line) except StopIteration as e: line = None finally: return line, table def scan_csv(gen): """ split csv generator into meta information and list of individual tables of tokens """ meta, tables = [], [] try: while True: line = next(gen) if isheader(line): line, table = seek_rows(line, gen) tables.append(table) if line: meta.append(line) except StopIteration as e: return meta, tables return scan_csv(csv.reader(open(file_path))) def __init__(self, file_path): super().__init__(self.__colstr_key, self._results_reader, file_path) @property def tic(self): ''' return tic table ''' return self['tic'] @property def lib(self): ''' return lib table ''' return self['lib'] @property def fid(self): ''' return fid table ''' return self['fid'] class AgilentGcmsDataMs(AgilentGcmsTableBase): """ Manages reading of Agilent DATA.MS file and stacking into single pandas DataFrame Parameters ---------- file_path : str Path to DATA.MS file. """ __chrom_colstr = { 'tic': ('tic', 'f4'), 'tme': ('tme', 'f4') } __colstr_key = { 'chromatogram': __chrom_colstr } @classmethod def access_colstr(attr): ''' return header only ''' if attr in __colstr_key: return __colstr_key[attr] return None @staticmethod def _read_chromatogram(file_path): """ Extract tic and tme data from DATA.MS file Args: file_path (str): path to DATA.MS file Returns: ([meta], [data]): ``meta`` is the metadata lines in the DATA.MS file. ``data`` is the tic and tme lines from the DATA.MS file as [tic, tme]. """ f = open(file_path, 'rb') f.seek(0x5) # get number of scans to read in if f.read(4) == 'GC': f.seek(0x142) else: f.seek(0x118) nscans = struct.unpack('>H', f.read(2))[0] f.seek(0x10A) # find the starting location of the data f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2) tme = np.zeros(nscans) tic = np.zeros(nscans) for i in range(nscans): npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0] tme[i] = struct.unpack('>I', f.read(4))[0] / 60000. f.seek(npos - 4) tic[i] = struct.unpack('>I', f.read(4))[0] f.seek(npos) f.close() jj = [['tic', 'tme']] + [list(a) for a in list(zip(tic, tme))] return [], [jj] @staticmethod def _read_spectra(file_path): """ Extract chromatogram data from DATA.MS file Args: file_path (str): path to DATA.MS file Returns: pandas DataFrame with chromatograph ions as columns, time as index, measurements as values """ f = open(file_path, 'rb') f.seek(0x5) # get number of scans to read in if f.read(4) == 'GC': # GC and LC chemstation store in different places f.seek(0x142) else: f.seek(0x118) nscans = struct.unpack('>H', f.read(2))[0] f.seek(0x10A) f.seek(2 * struct.unpack('>H', f.read(2))[0] - 2) dstart = f.tell() tot_pts = 0 # determine total number of measurements in file rowst = np.empty(nscans + 1, dtype=int) rowst[0] = 0 for scn in range(nscans): # get the position of the next scan npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0] # keep a running total of how many measurements tot_pts += (npos - f.tell() - 26) / 4 rowst[scn + 1] = tot_pts f.seek(npos) # move forward # go back to the beginning and load all the other data f.seek(dstart) ions = [] i_lkup = {} cols = np.empty(int(tot_pts), dtype=np.int) vals = np.empty(int(tot_pts), dtype=np.int) times = np.empty(nscans) for scn in range(nscans): npos = f.tell() + 2 * struct.unpack('>H', f.read(2))[0] # the sampling rate is evidentally 60 kHz on all Agilent's MS's times[scn] = struct.unpack('>I', f.read(4))[0] / 60000. f.seek(f.tell() + 12) npts = rowst[scn + 1] - rowst[scn] mzs = struct.unpack('>' + npts * 'HH', f.read(npts * 4)) nions = set(mzs[0::2]).difference(i_lkup) i_lkup.update({ion: i + len(ions) for i, ion in enumerate(nions)}) ions += nions cols[rowst[scn]:rowst[scn + 1]] = [i_lkup[i] for i in mzs[0::2]] vals[rowst[scn]:rowst[scn + 1]] = mzs[1::2] f.seek(npos) f.close() vals = ((vals & 16383) * 8 ** (vals >> 14)).astype(float) data = scipy.sparse.csr_matrix( (vals, cols, rowst), shape=(nscans, len(ions)), dtype=float ) ions = np.array(ions) / 20. return pd.DataFrame(data=data.todense(), index=times, columns=ions) def __init__(self, file_path): self._spectra = self._read_spectra(file_path) super().__init__(self.__colstr_key, self._read_chromatogram, file_path) @property def spectra(self): """ WIP: For testing chromatogram build """ return self._spectra @property def chromatogram(self): """ """ return self['chromatogram'] class AgilentGcfid(AgilentGcmsTableBase): """ Manages reading of Agilent DATA.MS file and stacking into single pandas DataFrame Parameters ---------- file_path : str Path to DATA.MS file. """ __chrom_colstr = { 'fid': ('fid', 'f4'), 'tme': ('tme', 'f4') } __colstr_key = { 'chromatogram_fid': __chrom_colstr } @classmethod def access_colstr(attr): ''' return header only ''' if attr in __colstr_key: return __colstr_key[attr] return None @staticmethod def _read_chromatogram_fid(file_path): """ Extract fid and tme data from FID1A.ch file Args: file_path (str): path to FID1A.ch file Returns: ([meta], [data]): ``meta`` is the metadata lines in the FID1A.ch file. ``data`` is the fid and tme lines from the FID1A.ch file as [fid, tme]. """ f = open(file_path, 'rb') f.seek(0x11A) start_time = struct.unpack('>f', f.read(4))[0] / 60000. end_time = struct.unpack('>f', f.read(4))[0] / 60000. f.seek(0x1800) fid = np.fromfile(f, '<f8') tme = np.linspace(start_time, end_time, fid.shape[0]) jj = [['fid', 'tme']] + [list(a) for a in list(zip(fid, tme))] return [], [jj] def __init__(self, file_path): super().__init__(self.__colstr_key, self._read_chromatogram_fid, file_path) @property def spectra(self): """ WIP: For testing chromatogram build """ return self._spectra @property def chromatogram(self): """ """ return self['chromatogram_fid'] class AgilentGcmsDir(object): """ Read all GCMS files from Agilent .D folder. Parameters ---------- dir_path : str Path to Agilent .D folder. """ __file_str = { 'acqmeth.txt': None, 'audit.txt': None, 'cnorm.ini': None, 'data.ms': AgilentGcmsDataMs, 'fames-ha.res': None, 'fames-ha.xls': None, 'fileinfo.txt': None, 'ls_report': None, 'percent.txt': None, 'pre_post.ini': None, 'qreport.txt': None, 'results.csv': AgilentGcmsResults, 'fid1a.ch':AgilentGcfid } @classmethod def _diriter(cls, dir_path): """ Non-public method that returns all files in Agilent .D folder. Parameters ---------- dir_path : str Path to Agilent .D folder. Returns ------- Generator Itarable over files in ``dir_path``. """ return ((f, os.path.join(root, f)) for root, dirs, files in os.walk(dir_path) for f in files if f.lower() in cls.__file_str) def __init__(self, dir_path): self._dir_path = dir_path self._files = {fn.lower(): fp for fn, fp in AgilentGcmsDir._diriter(dir_path)} self._data = {fn.lower(): None for fn in self._files} def _key_validate(self, key): """ Non-public method to validate build of file in Agilent .D folder. Parameters ---------- key : str Name of file in Agilent .D folder. Raises ------ KeyError Raised iff ``key`` not a known Agilent .D file, or if ``key`` is a known Agilent .D file, but was not present in the provided ``_dir_path``. NotImplementedError If build object associated with ``key`` is not implemented. """ if key not in self._data: raise KeyError( '{} not in {}'.format(key, self._dir_path) ) if key not in AgilentGcmsDir.__file_str: raise KeyError( '{} not a recognized file'.format(key) ) if not AgilentGcmsDir.__file_str[key]: raise NotImplementedError( 'parser for {} not yet implemented'.format(key) ) def _data_cache(self, key): """ Non-public accessor for loading or getting data associated with ``key``. Performs appropriate validation on ``key``. Parameters ---------- key : str File key associated with data to load or get. Returns ------- DataFrame Data loaded from file associated with ``key``. """ self._key_validate(key) if self._data[key] is None: self._data[key] = AgilentGcmsDir.__file_str[key](self._files[key]) return self._data[key] @property def datams(self): """ obj: AgilentGcmsDataMs built from DATA.MS file in Agilent .D folder """ return self._data_cache('data.ms') @property def datafid(self): return self._data_cache('fid1a.ch') @property def results(self): """ obj: AgilentGcmsResult built from RESULTS.CSV file in Agilent .D folder. """ return self._data_cache('results.csv') class AgilentGcms(object): """ Read GCMS files from one or more Agilent .D folders into a collection of pandas.DataFrame. Parameters ---------- dir_list : list(str) Paths to Agilent .D folders. dir_keys : list(str) Optional. Provide custom names for the .D folders. If omitted, the folders' names are used. """ @classmethod def from_dir(cls, agilent_dir): """ Initialize AgilentGcms from single Agilent .D folder. Parameters ---------- agilent_dir : str Path to Agilent .D folder. Returns ------- obj AgilentGcms object constructed from single Agilent .D folder """ dir_list = [agilent_dir] return cls(dir_list) @classmethod def from_root(cls, root_dir): """ Initialize AgilentGcms from root folder containing at least one Agilent .D folder. Parameters ---------- root_dir : str Path to folder containing at least one Agilent .D folder. Returns ------- obj AgilentGcms object constructed from a folder containing one or more Agilent .D folders """ dir_list = [os.path.join(root_dir, path) for path in next(os.walk(root_dir))[1]] return cls(dir_list) def _pandas_stack(self, accessor, attr): """ Non-public method for stacking all data """ dfs = [] for key, val in self._folders.items(): try: df = getattr(val, accessor)[attr] if df is not None: dfs.append(df.assign(key=key)) except KeyError: # dfs.append(pd.DataFrame({'key': [key]})) print(f'missing `{attr}` from `{accessor}` in {key}') if not dfs: return None return pd.concat(dfs, axis=0).set_index('key') def _dict_stack(self, accessor, attr): """ temporary hack to accomodate unstackable spectra """ stack = {} for key, val in self._folders.items(): try: stack[key] = getattr(getattr(val, accessor), attr) except KeyError: print(f'cannot access `{attr}` from `{accessor}` file') return stack def __init__(self, dir_list, dir_keys=None): if not dir_keys: dir_keys = [os.path.basename(path) for path in dir_list] self._folders = {k: AgilentGcmsDir(v) for k, v in zip(dir_keys, dir_list)} self._results_tic = self._pandas_stack('results', 'tic') self._results_fid = self._pandas_stack('results', 'fid') self._results_lib = self._pandas_stack('results', 'lib') self._chromatogram = self._pandas_stack('datams', 'chromatogram') self._chromatogram_fid = self._pandas_stack('datafid','chromatogram_fid') self._spectra = self._dict_stack('datams', 'spectra') @property def keys(self): """ list(str): Keys representing .D folder names. """ return self._folders.keys() @property def chromatogram(self): """ pandas.DataFrame: DATA.MS data extracted from .D folders. """ return self._chromatogram @property def chromatogram_fid(self): """ pandas.DataFrame: FID.ch data extracted from .D folders. """ return self._chromatogram_fid @property def spectra(self): """ """ return self._spectra @property def results_fid(self): """ pandas.DataFrame: RESULTS.CSV fid data from .D folders """ return self._results_fid @property def results_lib(self): """ pandas.DataFrame: RESULTS.CSV lib data from .D folders """ return self._results_lib @property def results_tic(self): """ pandas.DataFrame: RESULTS.CSV tic data from all .D folders """ return self._results_tic
bsd-3-clause
jefflyn/buddha
src/mlia/Ch05/EXTRAS/plot2D.py
4
1233
''' Created on Oct 6, 2010 @author: Peter ''' from numpy import * import matplotlib import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import logRegres dataMat,labelMat=logRegres.loadDataSet() dataArr = array(dataMat) weights = logRegres.stocGradAscent0(dataArr,labelMat) n = shape(dataArr)[0] #number of points to create xcord1 = []; ycord1 = [] xcord2 = []; ycord2 = [] markers =[] colors =[] for i in range(n): if int(labelMat[i])== 1: xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2]) else: xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2]) fig = plt.figure() ax = fig.add_subplot(111) #ax.scatter(xcord,ycord, c=colors, s=markers) type1 = ax.scatter(xcord1, ycord1, s=30, c='red', marker='s') type2 = ax.scatter(xcord2, ycord2, s=30, c='green') x = arange(-3.0, 3.0, 0.1) #weights = [-2.9, 0.72, 1.29] #weights = [-5, 1.09, 1.42] weights = [13.03822793, 1.32877317, -1.96702074] weights = [4.12, 0.48, -0.6168] y = (-weights[0]-weights[1]*x)/weights[2] type3 = ax.plot(x, y) #ax.legend([type1, type2, type3], ["Did Not Like", "Liked in Small Doses", "Liked in Large Doses"], loc=2) #ax.axis([-5000,100000,-2,25]) plt.xlabel('X1') plt.ylabel('X2') plt.show()
artistic-2.0
mne-tools/mne-tools.github.io
0.20/_downloads/df37da09f8cc04b503dd281d8471168a/plot_dics.py
2
12493
# -*- coding: utf-8 -*- """ DICS for power mapping ====================== In this tutorial, we'll simulate two signals originating from two locations on the cortex. These signals will be sinusoids, so we'll be looking at oscillatory activity (as opposed to evoked activity). We'll use dynamic imaging of coherent sources (DICS) [1]_ to map out spectral power along the cortex. Let's see if we can find our two simulated sources. """ # Author: Marijn van Vliet <w.m.vanvliet@gmail.com> # # License: BSD (3-clause) ############################################################################### # Setup # ----- # We first import the required packages to run this tutorial and define a list # of filenames for various things we'll be using. import os.path as op import numpy as np from scipy.signal import welch, coherence, unit_impulse from matplotlib import pyplot as plt import mne from mne.simulation import simulate_raw, add_noise from mne.datasets import sample from mne.minimum_norm import make_inverse_operator, apply_inverse from mne.time_frequency import csd_morlet from mne.beamformer import make_dics, apply_dics_csd # We use the MEG and MRI setup from the MNE-sample dataset data_path = sample.data_path(download=False) subjects_dir = op.join(data_path, 'subjects') # Filenames for various files we'll be using meg_path = op.join(data_path, 'MEG', 'sample') raw_fname = op.join(meg_path, 'sample_audvis_raw.fif') fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif') cov_fname = op.join(meg_path, 'sample_audvis-cov.fif') fwd = mne.read_forward_solution(fwd_fname) # Seed for the random number generator rand = np.random.RandomState(42) ############################################################################### # Data simulation # --------------- # # The following function generates a timeseries that contains an oscillator, # whose frequency fluctuates a little over time, but stays close to 10 Hz. # We'll use this function to generate our two signals. sfreq = 50. # Sampling frequency of the generated signal n_samp = int(round(10. * sfreq)) times = np.arange(n_samp) / sfreq # 10 seconds of signal n_times = len(times) def coh_signal_gen(): """Generate an oscillating signal. Returns ------- signal : ndarray The generated signal. """ t_rand = 0.001 # Variation in the instantaneous frequency of the signal std = 0.1 # Std-dev of the random fluctuations added to the signal base_freq = 10. # Base frequency of the oscillators in Hertz n_times = len(times) # Generate an oscillator with varying frequency and phase lag. signal = np.sin(2.0 * np.pi * (base_freq * np.arange(n_times) / sfreq + np.cumsum(t_rand * rand.randn(n_times)))) # Add some random fluctuations to the signal. signal += std * rand.randn(n_times) # Scale the signal to be in the right order of magnitude (~100 nAm) # for MEG data. signal *= 100e-9 return signal ############################################################################### # Let's simulate two timeseries and plot some basic information about them. signal1 = coh_signal_gen() signal2 = coh_signal_gen() fig, axes = plt.subplots(2, 2, figsize=(8, 4)) # Plot the timeseries ax = axes[0][0] ax.plot(times, 1e9 * signal1, lw=0.5) ax.set(xlabel='Time (s)', xlim=times[[0, -1]], ylabel='Amplitude (Am)', title='Signal 1') ax = axes[0][1] ax.plot(times, 1e9 * signal2, lw=0.5) ax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2') # Power spectrum of the first timeseries f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256) ax = axes[1][0] # Only plot the first 100 frequencies ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.) ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 99]], ylabel='Power (dB)', title='Power spectrum of signal 1') # Compute the coherence between the two timeseries f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64) ax = axes[1][1] ax.plot(f[:50], coh[:50], lw=1.) ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 49]], ylabel='Coherence', title='Coherence between the timeseries') fig.tight_layout() ############################################################################### # Now we put the signals at two locations on the cortex. We construct a # :class:`mne.SourceEstimate` object to store them in. # # The timeseries will have a part where the signal is active and a part where # it is not. The techniques we'll be using in this tutorial depend on being # able to contrast data that contains the signal of interest versus data that # does not (i.e. it contains only noise). # The locations on the cortex where the signal will originate from. These # locations are indicated as vertex numbers. vertices = [[146374], [33830]] # Construct SourceEstimates that describe the signals at the cortical level. data = np.vstack((signal1, signal2)) stc_signal = mne.SourceEstimate( data, vertices, tmin=0, tstep=1. / sfreq, subject='sample') stc_noise = stc_signal * 0. ############################################################################### # Before we simulate the sensor-level data, let's define a signal-to-noise # ratio. You are encouraged to play with this parameter and see the effect of # noise on our results. snr = 1. # Signal-to-noise ratio. Decrease to add more noise. ############################################################################### # Now we run the signal through the forward model to obtain simulated sensor # data. To save computation time, we'll only simulate gradiometer data. You can # try simulating other types of sensors as well. # # Some noise is added based on the baseline noise covariance matrix from the # sample dataset, scaled to implement the desired SNR. # Read the info from the sample dataset. This defines the location of the # sensors and such. info = mne.io.read_info(raw_fname) info.update(sfreq=sfreq, bads=[]) # Only use gradiometers picks = mne.pick_types(info, meg='grad', stim=True, exclude=()) mne.pick_info(info, picks, copy=False) # Define a covariance matrix for the simulated noise. In this tutorial, we use # a simple diagonal matrix. cov = mne.cov.make_ad_hoc_cov(info) cov['data'] *= (20. / snr) ** 2 # Scale the noise to achieve the desired SNR # Simulate the raw data, with a lowpass filter on the noise stcs = [(stc_signal, unit_impulse(n_samp, dtype=int) * 1), (stc_noise, unit_impulse(n_samp, dtype=int) * 2)] # stacked in time duration = (len(stc_signal.times) * 2) / sfreq raw = simulate_raw(info, stcs, forward=fwd) add_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand) ############################################################################### # We create an :class:`mne.Epochs` object containing two trials: one with # both noise and signal and one with just noise events = mne.find_events(raw, initial_event=True) tmax = (len(stc_signal.times) - 1) / sfreq epochs = mne.Epochs(raw, events, event_id=dict(signal=1, noise=2), tmin=0, tmax=tmax, baseline=None, preload=True) assert len(epochs) == 2 # ensure that we got the two expected events # Plot some of the channels of the simulated data that are situated above one # of our simulated sources. picks = mne.pick_channels(epochs.ch_names, mne.read_selection('Left-frontal')) epochs.plot(picks=picks) ############################################################################### # Power mapping # ------------- # With our simulated dataset ready, we can now pretend to be researchers that # have just recorded this from a real subject and are going to study what parts # of the brain communicate with each other. # # First, we'll create a source estimate of the MEG data. We'll use both a # straightforward MNE-dSPM inverse solution for this, and the DICS beamformer # which is specifically designed to work with oscillatory data. ############################################################################### # Computing the inverse using MNE-dSPM: # Compute the inverse operator fwd = mne.read_forward_solution(fwd_fname) inv = make_inverse_operator(epochs.info, fwd, cov) # Apply the inverse model to the trial that also contains the signal. s = apply_inverse(epochs['signal'].average(), inv) # Take the root-mean square along the time dimension and plot the result. s_rms = np.sqrt((s ** 2).mean()) title = 'MNE-dSPM inverse (RMS)' brain = s_rms.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=1, size=600, time_label=title, title=title) # Indicate the true locations of the source activity on the plot. brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh') brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh') # Rotate the view and add a title. brain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550, 'focalpoint': [0, 0, 0]}) ############################################################################### # We will now compute the cortical power map at 10 Hz. using a DICS beamformer. # A beamformer will construct for each vertex a spatial filter that aims to # pass activity originating from the vertex, while dampening activity from # other sources as much as possible. # # The :func:`mne.beamformer.make_dics` function has many switches that offer # precise control # over the way the filter weights are computed. Currently, there is no clear # consensus regarding the best approach. This is why we will demonstrate two # approaches here: # # 1. The approach as described in [2]_, which first normalizes the forward # solution and computes a vector beamformer. # 2. The scalar beamforming approach based on [3]_, which uses weight # normalization instead of normalizing the forward solution. # Estimate the cross-spectral density (CSD) matrix on the trial containing the # signal. csd_signal = csd_morlet(epochs['signal'], frequencies=[10]) # Compute the spatial filters for each vertex, using two approaches. filters_approach1 = make_dics( info, fwd, csd_signal, reg=0.05, pick_ori='max-power', normalize_fwd=True, inversion='single', weight_norm=None) print(filters_approach1) filters_approach2 = make_dics( info, fwd, csd_signal, reg=0.1, pick_ori='max-power', normalize_fwd=False, inversion='matrix', weight_norm='unit-noise-gain') print(filters_approach2) # You can save these to disk with: # filters_approach1.save('filters_1-dics.h5') # Compute the DICS power map by applying the spatial filters to the CSD matrix. power_approach1, f = apply_dics_csd(csd_signal, filters_approach1) power_approach2, f = apply_dics_csd(csd_signal, filters_approach2) # Plot the DICS power maps for both approaches. for approach, power in enumerate([power_approach1, power_approach2], 1): title = 'DICS power map, approach %d' % approach brain = power.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=approach + 1, size=600, time_label=title, title=title) # Indicate the true locations of the source activity on the plot. brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh') brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh') # Rotate the view and add a title. brain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550, 'focalpoint': [0, 0, 0]}) ############################################################################### # Excellent! All methods found our two simulated sources. Of course, with a # signal-to-noise ratio (SNR) of 1, is isn't very hard to find them. You can # try playing with the SNR and see how the MNE-dSPM and DICS approaches hold up # in the presence of increasing noise. In the presence of more noise, you may # need to increase the regularization parameter of the DICS beamformer. # # References # ---------- # .. [1] Gross et al. (2001). Dynamic imaging of coherent sources: Studying # neural interactions in the human brain. Proceedings of the National # Academy of Sciences, 98(2), 694-699. # https://doi.org/10.1073/pnas.98.2.694 # .. [2] van Vliet, et al. (2018) Analysis of functional connectivity and # oscillatory power using DICS: from raw MEG data to group-level # statistics in Python. bioRxiv, 245530. https://doi.org/10.1101/245530 # .. [3] Sekihara & Nagarajan. Adaptive spatial filters for electromagnetic # brain imaging (2008) Springer Science & Business Media
bsd-3-clause
udibr/fast-rcnn
lib/roi_data_layer/minibatch.py
44
7337
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Compute minibatch blobs for training a Fast R-CNN network.""" import numpy as np import numpy.random as npr import cv2 from fast_rcnn.config import cfg from utils.blob import prep_im_for_blob, im_list_to_blob def get_minibatch(roidb, num_classes): """Given a roidb, construct a minibatch sampled from it.""" num_images = len(roidb) # Sample random scales to use for each image in this batch random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images) assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \ 'num_images ({}) must divide BATCH_SIZE ({})'. \ format(num_images, cfg.TRAIN.BATCH_SIZE) rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image) # Get the input image blob, formatted for caffe im_blob, im_scales = _get_image_blob(roidb, random_scale_inds) # Now, build the region of interest and label blobs rois_blob = np.zeros((0, 5), dtype=np.float32) labels_blob = np.zeros((0), dtype=np.float32) bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32) bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32) # all_overlaps = [] for im_i in xrange(num_images): labels, overlaps, im_rois, bbox_targets, bbox_loss \ = _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image, num_classes) # Add to RoIs blob rois = _project_im_rois(im_rois, im_scales[im_i]) batch_ind = im_i * np.ones((rois.shape[0], 1)) rois_blob_this_image = np.hstack((batch_ind, rois)) rois_blob = np.vstack((rois_blob, rois_blob_this_image)) # Add to labels, bbox targets, and bbox loss blobs labels_blob = np.hstack((labels_blob, labels)) bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets)) bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss)) # all_overlaps = np.hstack((all_overlaps, overlaps)) # For debug visualizations # _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps) blobs = {'data': im_blob, 'rois': rois_blob, 'labels': labels_blob} if cfg.TRAIN.BBOX_REG: blobs['bbox_targets'] = bbox_targets_blob blobs['bbox_loss_weights'] = bbox_loss_blob return blobs def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes): """Generate a random sample of RoIs comprising foreground and background examples. """ # label = class RoI has max overlap with labels = roidb['max_classes'] overlaps = roidb['max_overlaps'] rois = roidb['boxes'] # Select foreground RoIs as those with >= FG_THRESH overlap fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0] # Guard against the case when an image has fewer than fg_rois_per_image # foreground RoIs fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size) # Sample foreground regions without replacement if fg_inds.size > 0: fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False) # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0] # Compute number of background RoIs to take from this image (guarding # against there being fewer than desired) bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size) # Sample foreground regions without replacement if bg_inds.size > 0: bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False) # The indices that we're selecting (both fg and bg) keep_inds = np.append(fg_inds, bg_inds) # Select sampled values from various arrays: labels = labels[keep_inds] # Clamp labels for the background RoIs to 0 labels[fg_rois_per_this_image:] = 0 overlaps = overlaps[keep_inds] rois = rois[keep_inds] bbox_targets, bbox_loss_weights = \ _get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :], num_classes) return labels, overlaps, rois, bbox_targets, bbox_loss_weights def _get_image_blob(roidb, scale_inds): """Builds an input blob from the images in the roidb at the specified scales. """ num_images = len(roidb) processed_ims = [] im_scales = [] for i in xrange(num_images): im = cv2.imread(roidb[i]['image']) if roidb[i]['flipped']: im = im[:, ::-1, :] target_size = cfg.TRAIN.SCALES[scale_inds[i]] im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE) im_scales.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, im_scales def _project_im_rois(im_rois, im_scale_factor): """Project image RoIs into the rescaled training image.""" rois = im_rois * im_scale_factor return rois def _get_bbox_regression_labels(bbox_target_data, num_classes): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: bbox_target_data (ndarray): N x 4K blob of regression targets bbox_loss_weights (ndarray): N x 4K blob of loss weights """ clss = bbox_target_data[:, 0] bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32) inds = np.where(clss > 0)[0] for ind in inds: cls = clss[ind] start = 4 * cls end = start + 4 bbox_targets[ind, start:end] = bbox_target_data[ind, 1:] bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.] return bbox_targets, bbox_loss_weights def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps): """Visualize a mini-batch for debugging.""" import matplotlib.pyplot as plt for i in xrange(rois_blob.shape[0]): rois = rois_blob[i, :] im_ind = rois[0] roi = rois[1:] im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy() im += cfg.PIXEL_MEANS im = im[:, :, (2, 1, 0)] im = im.astype(np.uint8) cls = labels_blob[i] plt.imshow(im) print 'class: ', cls, ' overlap: ', overlaps[i] plt.gca().add_patch( plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0], roi[3] - roi[1], fill=False, edgecolor='r', linewidth=3) ) plt.show()
mit
icdishb/scikit-learn
sklearn/linear_model/tests/test_base.py
120
10082
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # # License: BSD 3 clause import numpy as np from scipy import sparse from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.linear_model.base import LinearRegression from sklearn.linear_model.base import center_data, sparse_center_data from sklearn.utils import check_random_state from sklearn.datasets.samples_generator import make_sparse_uncorrelated from sklearn.datasets.samples_generator import make_regression def test_linear_regression(): # Test LinearRegression on a simple dataset. # a simple dataset X = [[1], [2]] Y = [1, 2] clf = LinearRegression() clf.fit(X, Y) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.predict(X), [1, 2]) # test it also for degenerate input X = [[1]] Y = [0] clf = LinearRegression() clf.fit(X, Y) assert_array_almost_equal(clf.coef_, [0]) assert_array_almost_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.predict(X), [0]) def test_fit_intercept(): # Test assertions on betas shape. X2 = np.array([[0.38349978, 0.61650022], [0.58853682, 0.41146318]]) X3 = np.array([[0.27677969, 0.70693172, 0.01628859], [0.08385139, 0.20692515, 0.70922346]]) y = np.array([1, 1]) lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y) lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y) lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y) lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y) assert_equal(lr2_with_intercept.coef_.shape, lr2_without_intercept.coef_.shape) assert_equal(lr3_with_intercept.coef_.shape, lr3_without_intercept.coef_.shape) assert_equal(lr2_without_intercept.coef_.ndim, lr3_without_intercept.coef_.ndim) def test_linear_regression_sparse(random_state=0): "Test that linear regression also works with sparse data" random_state = check_random_state(random_state) for i in range(10): n = 100 X = sparse.eye(n, n) beta = random_state.rand(n) y = X * beta[:, np.newaxis] ols = LinearRegression() ols.fit(X, y.ravel()) assert_array_almost_equal(beta, ols.coef_ + ols.intercept_) assert_array_almost_equal(ols.residues_, 0) def test_linear_regression_multiple_outcome(random_state=0): "Test multiple-outcome linear regressions" X, y = make_regression(random_state=random_state) Y = np.vstack((y, y)).T n_features = X.shape[1] clf = LinearRegression(fit_intercept=True) clf.fit((X), Y) assert_equal(clf.coef_.shape, (2, n_features)) Y_pred = clf.predict(X) clf.fit(X, y) y_pred = clf.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_linear_regression_sparse_multiple_outcome(random_state=0): "Test multiple-outcome linear regressions with sparse data" random_state = check_random_state(random_state) X, y = make_sparse_uncorrelated(random_state=random_state) X = sparse.coo_matrix(X) Y = np.vstack((y, y)).T n_features = X.shape[1] ols = LinearRegression() ols.fit(X, Y) assert_equal(ols.coef_.shape, (2, n_features)) Y_pred = ols.predict(X) ols.fit(X, y.ravel()) y_pred = ols.predict(X) assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3) def test_center_data(): n_samples = 200 n_features = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) expected_X_mean = np.mean(X, axis=0) # XXX: currently scaled to variance=n_samples expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0]) expected_y_mean = np.mean(y, axis=0) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std) assert_array_almost_equal(yt, y - expected_y_mean) def test_center_data_multioutput(): n_samples = 200 n_features = 3 n_outputs = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_outputs) expected_y_mean = np.mean(y, axis=0) args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))] for center, X in args: _, yt, _, y_mean, _ = center(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(y_mean, np.zeros(n_outputs)) assert_array_almost_equal(yt, y) _, yt, _, y_mean, _ = center(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) _, yt, _, y_mean, _ = center(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(yt, y - y_mean) def test_center_data_weighted(): n_samples = 200 n_features = 2 rng = check_random_state(0) X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) sample_weight = rng.rand(n_samples) expected_X_mean = np.average(X, axis=0, weights=sample_weight) expected_y_mean = np.average(y, axis=0, weights=sample_weight) # XXX: if normalize=True, should we expect a weighted standard deviation? # Currently not weighted, but calculated with respect to weighted mean # XXX: currently scaled to variance=n_samples expected_X_std = (np.sqrt(X.shape[0]) * np.mean((X - expected_X_mean) ** 2, axis=0) ** .5) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=False, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt, X - expected_X_mean) assert_array_almost_equal(yt, y - expected_y_mean) Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True, normalize=True, sample_weight=sample_weight) assert_array_almost_equal(X_mean, expected_X_mean) assert_array_almost_equal(y_mean, expected_y_mean) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std) assert_array_almost_equal(yt, y - expected_y_mean) def test_sparse_center_data(): n_samples = 200 n_features = 2 rng = check_random_state(0) # random_state not supported yet in sparse.rand X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng X = X.tolil() y = rng.rand(n_samples) XA = X.toarray() # XXX: currently scaled to variance=n_samples expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0]) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=False, normalize=False) assert_array_almost_equal(X_mean, np.zeros(n_features)) assert_array_almost_equal(y_mean, 0) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=True, normalize=False) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_std, np.ones(n_features)) assert_array_almost_equal(Xt.A, XA) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y, fit_intercept=True, normalize=True) assert_array_almost_equal(X_mean, np.mean(XA, axis=0)) assert_array_almost_equal(y_mean, np.mean(y, axis=0)) assert_array_almost_equal(X_std, expected_X_std) assert_array_almost_equal(Xt.A, XA / expected_X_std) assert_array_almost_equal(yt, y - np.mean(y, axis=0)) def test_csr_sparse_center_data(): # Test output format of sparse_center_data, when input is csr X, y = make_regression() X[X < 2.5] = 0.0 csr = sparse.csr_matrix(X) csr_, y, _, _, _ = sparse_center_data(csr, y, True) assert_equal(csr_.getformat(), 'csr')
bsd-3-clause
mne-tools/mne-python
tutorials/preprocessing/25_background_filtering.py
3
48286
# -*- coding: utf-8 -*- r""" .. _disc-filtering: =================================== Background information on filtering =================================== Here we give some background information on filtering in general, and how it is done in MNE-Python in particular. Recommended reading for practical applications of digital filter design can be found in Parks & Burrus (1987) :footcite:`ParksBurrus1987` and Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, and for filtering in an M/EEG context we recommend reading Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`. .. note:: This tutorial goes pretty deep into the mathematics of filtering and the design decisions that go into choosing a filter. If you just want to know how to apply the default filters in MNE-Python to your data, skip this tutorial and read :ref:`tut-filter-resample` instead (but someday, you should come back and read this one too 🙂). Problem statement ================= Practical issues with filtering electrophysiological data are covered in Widmann *et al.* (2012) :footcite:`WidmannSchroger2012`, where they conclude with this statement: Filtering can result in considerable distortions of the time course (and amplitude) of a signal as demonstrated by VanRullen (2011) :footcite:`VanRullen2011`. Thus, filtering should not be used lightly. However, if effects of filtering are cautiously considered and filter artifacts are minimized, a valid interpretation of the temporal dynamics of filtered electrophysiological data is possible and signals missed otherwise can be detected with filtering. In other words, filtering can increase signal-to-noise ratio (SNR), but if it is not used carefully, it can distort data. Here we hope to cover some filtering basics so users can better understand filtering trade-offs and why MNE-Python has chosen particular defaults. .. _tut-filtering-basics: Filtering basics ================ Let's get some of the basic math down. In the frequency domain, digital filters have a transfer function that is given by: .. math:: H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + \ldots + b_M z^{-M}} {1 + a_1 z^{-1} + a_2 z^{-2} + \ldots + a_N z^{-M}} \\ &= \frac{\sum_{k=0}^Mb_kz^{-k}}{\sum_{k=1}^Na_kz^{-k}} In the time domain, the numerator coefficients :math:`b_k` and denominator coefficients :math:`a_k` can be used to obtain our output data :math:`y(n)` in terms of our input data :math:`x(n)` as: .. math:: :label: summations y(n) &= b_0 x(n) + b_1 x(n-1) + \ldots + b_M x(n-M) - a_1 y(n-1) - a_2 y(n - 2) - \ldots - a_N y(n - N)\\ &= \sum_{k=0}^M b_k x(n-k) - \sum_{k=1}^N a_k y(n-k) In other words, the output at time :math:`n` is determined by a sum over 1. the numerator coefficients :math:`b_k`, which get multiplied by the previous input values :math:`x(n-k)`, and 2. the denominator coefficients :math:`a_k`, which get multiplied by the previous output values :math:`y(n-k)`. Note that these summations correspond to (1) a weighted `moving average`_ and (2) an autoregression_. Filters are broken into two classes: FIR_ (finite impulse response) and IIR_ (infinite impulse response) based on these coefficients. FIR filters use a finite number of numerator coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output value of :math:`y(n)` depends only on the :math:`M` previous input values. IIR filters depend on the previous input and output values, and thus can have effectively infinite impulse responses. As outlined in Parks & Burrus (1987) :footcite:`ParksBurrus1987`, FIR and IIR have different trade-offs: * A causal FIR filter can be linear-phase -- i.e., the same time delay across all frequencies -- whereas a causal IIR filter cannot. The phase and group delay characteristics are also usually better for FIR filters. * IIR filters can generally have a steeper cutoff than an FIR filter of equivalent order. * IIR filters are generally less numerically stable, in part due to accumulating error (due to its recursive calculations). In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`: Despite IIR filters often being considered as computationally more efficient, they are recommended only when high throughput and sharp cutoffs are required (Ifeachor and Jervis, 2002 :footcite:`IfeachorJervis2002`, p. 321)... FIR filters are easier to control, are always stable, have a well-defined passband, can be corrected to zero-phase without additional computations, and can be converted to minimum-phase. We therefore recommend FIR filters for most purposes in electrophysiological data analysis. When designing a filter (FIR or IIR), there are always trade-offs that need to be considered, including but not limited to: 1. Ripple in the pass-band 2. Attenuation of the stop-band 3. Steepness of roll-off 4. Filter order (i.e., length for FIR filters) 5. Time-domain ringing In general, the sharper something is in frequency, the broader it is in time, and vice-versa. This is a fundamental time-frequency trade-off, and it will show up below. FIR Filters =========== First, we will focus on FIR filters, which are the default filters used by MNE-Python. """ ############################################################################### # Designing FIR filters # --------------------- # Here we'll try to design a low-pass filter and look at trade-offs in terms # of time- and frequency-domain filter characteristics. Later, in # :ref:`tut-effect-on-signals`, we'll look at how such filters can affect # signals when they are used. # # First let's import some useful tools for filtering, and set some default # values for our data that are reasonable for M/EEG. import numpy as np from numpy.fft import fft, fftfreq from scipy import signal import matplotlib.pyplot as plt from mne.time_frequency.tfr import morlet from mne.viz import plot_filter, plot_ideal_filter import mne sfreq = 1000. f_p = 40. flim = (1., sfreq / 2.) # limits for plotting ############################################################################### # Take for example an ideal low-pass filter, which would give a magnitude # response of 1 in the pass-band (up to frequency :math:`f_p`) and a magnitude # response of 0 in the stop-band (down to frequency :math:`f_s`) such that # :math:`f_p=f_s=40` Hz here (shown to a lower limit of -60 dB for simplicity): nyq = sfreq / 2. # the Nyquist frequency is half our sample rate freq = [0, f_p, f_p, nyq] gain = [1, 1, 0, 0] third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.] ax = plt.subplots(1, figsize=third_height)[1] plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim) ############################################################################### # This filter hypothetically achieves zero ripple in the frequency domain, # perfect attenuation, and perfect steepness. However, due to the discontinuity # in the frequency response, the filter would require infinite ringing in the # time domain (i.e., infinite order) to be realized. Another way to think of # this is that a rectangular window in the frequency domain is actually a sinc_ # function in the time domain, which requires an infinite number of samples # (and thus infinite time) to represent. So although this filter has ideal # frequency suppression, it has poor time-domain characteristics. # # Let's try to naïvely make a brick-wall filter of length 0.1 s, and look # at the filter itself in the time domain and the frequency domain: n = int(round(0.1 * sfreq)) n -= n % 2 - 1 # make it odd t = np.arange(-(n // 2), n // 2 + 1) / sfreq # center our sinc h = np.sinc(2 * f_p * t) / (4 * np.pi) plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 s)', flim=flim, compensate=True) ############################################################################### # This is not so good! Making the filter 10 times longer (1 s) gets us a # slightly better stop-band suppression, but still has a lot of ringing in # the time domain. Note the x-axis is an order of magnitude longer here, # and the filter has a correspondingly much longer group delay (again equal # to half the filter length, or 0.5 seconds): n = int(round(1. * sfreq)) n -= n % 2 - 1 # make it odd t = np.arange(-(n // 2), n // 2 + 1) / sfreq h = np.sinc(2 * f_p * t) / (4 * np.pi) plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 s)', flim=flim, compensate=True) ############################################################################### # Let's make the stop-band tighter still with a longer filter (10 s), # with a resulting larger x-axis: n = int(round(10. * sfreq)) n -= n % 2 - 1 # make it odd t = np.arange(-(n // 2), n // 2 + 1) / sfreq h = np.sinc(2 * f_p * t) / (4 * np.pi) plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 s)', flim=flim, compensate=True) ############################################################################### # Now we have very sharp frequency suppression, but our filter rings for the # entire 10 seconds. So this naïve method is probably not a good way to build # our low-pass filter. # # Fortunately, there are multiple established methods to design FIR filters # based on desired response characteristics. These include: # # 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_) # 2. Windowed FIR design (:func:`scipy.signal.firwin2`, # :func:`scipy.signal.firwin`, and `MATLAB fir2`_) # 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_) # 4. Frequency-domain design (construct filter in Fourier # domain and use an :func:`IFFT <numpy.fft.ifft>` to invert it) # # .. note:: Remez and least squares designs have advantages when there are # "do not care" regions in our frequency response. However, we want # well controlled responses in all frequency regions. # Frequency-domain construction is good when an arbitrary response # is desired, but generally less clean (due to sampling issues) than # a windowed approach for more straightforward filter applications. # Since our filters (low-pass, high-pass, band-pass, band-stop) # are fairly simple and we require precise control of all frequency # regions, we will primarily use and explore windowed FIR design. # # If we relax our frequency-domain filter requirements a little bit, we can # use these functions to construct a lowpass filter that instead has a # *transition band*, or a region between the pass frequency :math:`f_p` # and stop frequency :math:`f_s`, e.g.: trans_bandwidth = 10 # 10 Hz transition band f_s = f_p + trans_bandwidth # = 50 Hz freq = [0., f_p, f_s, nyq] gain = [1., 1., 0., 0.] ax = plt.subplots(1, figsize=third_height)[1] title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth) plot_ideal_filter(freq, gain, ax, title=title, flim=flim) ############################################################################### # Accepting a shallower roll-off of the filter in the frequency domain makes # our time-domain response potentially much better. We end up with a more # gradual slope through the transition region, but a *much* cleaner time # domain signal. Here again for the 1 s filter: h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (1.0 s)', flim=flim, compensate=True) ############################################################################### # Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually # use a shorter filter (5 cycles at 10 Hz = 0.5 s) and still get acceptable # stop-band attenuation: n = int(round(sfreq * 0.5)) + 1 h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.5 s)', flim=flim, compensate=True) ############################################################################### # But if we shorten the filter too much (2 cycles of 10 Hz = 0.2 s), # our effective stop frequency gets pushed out past 60 Hz: n = int(round(sfreq * 0.2)) + 1 h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.2 s)', flim=flim, compensate=True) ############################################################################### # If we want a filter that is only 0.1 seconds long, we should probably use # something more like a 25 Hz transition band (0.2 s = 5 cycles @ 25 Hz): trans_bandwidth = 25 f_s = f_p + trans_bandwidth freq = [0, f_p, f_s, nyq] h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 50 Hz transition (0.2 s)', flim=flim, compensate=True) ############################################################################### # So far, we have only discussed *non-causal* filtering, which means that each # sample at each time point :math:`t` is filtered using samples that come # after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) the current # time point :math:`t`. # In this sense, each sample is influenced by samples that come both before # and after it. This is useful in many cases, especially because it does not # delay the timing of events. # # However, sometimes it can be beneficial to use *causal* filtering, # whereby each sample :math:`t` is filtered only using time points that came # after it. # # Note that the delay is variable (whereas for linear/zero-phase filters it # is constant) but small in the pass-band. Unlike zero-phase filters, which # require time-shifting backward the output of a linear-phase filtering stage # (and thus becoming non-causal), minimum-phase filters do not require any # compensation to achieve small delays in the pass-band. Note that as an # artifact of the minimum phase filter construction step, the filter does # not end up being as steep as the linear/zero-phase version. # # We can construct a minimum-phase filter from our existing linear-phase # filter with the :func:`scipy.signal.minimum_phase` function, and note # that the falloff is not as steep: h_min = signal.minimum_phase(h) plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim) ############################################################################### # .. _tut-effect-on-signals: # # Applying FIR filters # -------------------- # # Now lets look at some practical effects of these filters by applying # them to some data. # # Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part) # plus noise (random and line). Note that the original clean signal contains # frequency content in both the pass band and transition bands of our # low-pass filter. dur = 10. center = 2. morlet_freq = f_p tlim = [center - 0.2, center + 0.2] tticks = [tlim[0], center, tlim[1]] flim = [20, 70] x = np.zeros(int(sfreq * dur) + 1) blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20. n_onset = int(center * sfreq) - len(blip) // 2 x[n_onset:n_onset + len(blip)] += blip x_orig = x.copy() rng = np.random.RandomState(0) x += rng.randn(len(x)) / 1000. x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000. ############################################################################### # Filter it with a shallow cutoff, linear-phase FIR (which allows us to # compensate for the constant filter delay): transition_band = 0.25 * f_p f_s = f_p + transition_band freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] # This would be equivalent: h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, fir_design='firwin', verbose=True) x_v16 = np.convolve(h, x) # this is the linear->zero phase, causal-to-non-causal conversion / shift x_v16 = x_v16[len(h) // 2:] plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim, compensate=True) ############################################################################### # Filter it with a different design method ``fir_design="firwin2"``, and also # compensate for the constant filter delay. This method does not produce # quite as sharp a transition compared to ``fir_design="firwin"``, despite # being twice as long: transition_band = 0.25 * f_p f_s = f_p + transition_band freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] # This would be equivalent: # filter_dur = 6.6 / transition_band # sec # n = int(sfreq * filter_dur) # h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.) h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, fir_design='firwin2', verbose=True) x_v14 = np.convolve(h, x)[len(h) // 2:] plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim, compensate=True) ############################################################################### # Let's also filter with the MNE-Python 0.13 default, which is a # long-duration, steep cutoff FIR that gets applied twice: transition_band = 0.5 # Hz f_s = f_p + transition_band filter_dur = 10. # sec freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] # This would be equivalent # n = int(sfreq * filter_dur) # h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.) h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, h_trans_bandwidth=transition_band, filter_length='%ss' % filter_dur, fir_design='firwin2', verbose=True) x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1] # the effective h is one that is applied to the time-reversed version of itself h_eff = np.convolve(h, h[::-1]) plot_filter(h_eff, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim, compensate=True) ############################################################################### # Let's also filter it with the MNE-C default, which is a long-duration # steep-slope FIR filter designed using frequency-domain techniques: h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5) x_mne_c = np.convolve(h, x)[len(h) // 2:] transition_band = 5 # Hz (default in MNE-C) f_s = f_p + transition_band freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim, compensate=True) ############################################################################### # And now an example of a minimum-phase filter: h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, phase='minimum', fir_design='firwin', verbose=True) x_min = np.convolve(h, x) transition_band = 0.25 * f_p f_s = f_p + transition_band filter_dur = 6.6 / transition_band # sec n = int(sfreq * filter_dur) freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim) ############################################################################### # Both the MNE-Python 0.13 and MNE-C filters have excellent frequency # attenuation, but it comes at a cost of potential # ringing (long-lasting ripples) in the time domain. Ringing can occur with # steep filters, especially in signals with frequency content around the # transition band. Our Morlet wavelet signal has power in our transition band, # and the time-domain ringing is thus more pronounced for the steep-slope, # long-duration filter than the shorter, shallower-slope filter: axes = plt.subplots(1, 2)[1] def plot_signal(x, offset): """Plot a signal.""" t = np.arange(len(x)) / sfreq axes[0].plot(t, x + offset) axes[0].set(xlabel='Time (s)', xlim=t[[0, -1]]) X = fft(x) freqs = fftfreq(len(x), 1. / sfreq) mask = freqs >= 0 X = X[mask] freqs = freqs[mask] axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16))) axes[1].set(xlim=flim) yscale = 30 yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)', 'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase'] yticks = -np.arange(len(yticklabels)) / yscale plot_signal(x_orig, offset=yticks[0]) plot_signal(x, offset=yticks[1]) plot_signal(x_v16, offset=yticks[2]) plot_signal(x_v14, offset=yticks[3]) plot_signal(x_v13, offset=yticks[4]) plot_signal(x_mne_c, offset=yticks[5]) plot_signal(x_min, offset=yticks[6]) axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks, ylim=[-len(yticks) / yscale, 1. / yscale], yticks=yticks, yticklabels=yticklabels) for text in axes[0].get_yticklabels(): text.set(rotation=45, size=8) axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)', ylabel='Magnitude (dB)') mne.viz.tight_layout() plt.show() ############################################################################### # IIR filters # =========== # # MNE-Python also offers IIR filtering functionality that is based on the # methods from :mod:`scipy.signal`. Specifically, we use the general-purpose # functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`, # which provide unified interfaces to IIR filter design. # # Designing IIR filters # --------------------- # # Let's continue with our design of a 40 Hz low-pass filter and look at # some trade-offs of different IIR filters. # # Often the default IIR filter is a `Butterworth filter`_, which is designed # to have a *maximally flat pass-band*. Let's look at a few filter orders, # i.e., a few different number of coefficients used and therefore steepness # of the filter: # # .. note:: Notice that the group delay (which is related to the phase) of # the IIR filters below are not constant. In the FIR case, we can # design so-called linear-phase filters that have a constant group # delay, and thus compensate for the delay (making the filter # non-causal) if necessary. This cannot be done with IIR filters, as # they have a non-linear phase (non-constant group delay). As the # filter order increases, the phase distortion near and in the # transition band worsens. However, if non-causal (forward-backward) # filtering can be used, e.g. with :func:`scipy.signal.filtfilt`, # these phase issues can theoretically be mitigated. sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos') plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim, compensate=True) x_shallow = signal.sosfiltfilt(sos, x) del sos ############################################################################### # The falloff of this filter is not very steep. # # .. note:: Here we have made use of second-order sections (SOS) # by using :func:`scipy.signal.sosfilt` and, under the # hood, :func:`scipy.signal.zpk2sos` when passing the # ``output='sos'`` keyword argument to # :func:`scipy.signal.iirfilter`. The filter definitions # given :ref:`above <tut-filtering-basics>` use the polynomial # numerator/denominator (sometimes called "tf") form ``(b, a)``, # which are theoretically equivalent to the SOS form used here. # In practice, however, the SOS form can give much better results # due to issues with numerical precision (see # :func:`scipy.signal.sosfilt` for an example), so SOS should be # used whenever possible. # # Let's increase the order, and note that now we have better attenuation, # with a longer impulse response. Let's also switch to using the MNE filter # design function, which simplifies a few things and gives us some information # about the resulting filter: iir_params = dict(order=8, ftype='butter') filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, method='iir', iir_params=iir_params, verbose=True) plot_filter(filt, sfreq, freq, gain, 'Butterworth order=8', flim=flim, compensate=True) x_steep = signal.sosfiltfilt(filt['sos'], x) ############################################################################### # There are other types of IIR filters that we can use. For a complete list, # check out the documentation for :func:`scipy.signal.iirdesign`. Let's # try a Chebychev (type I) filter, which trades off ripple in the pass-band # to get better attenuation in the stop-band: iir_params.update(ftype='cheby1', rp=1., # dB of acceptable pass-band ripple ) filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, method='iir', iir_params=iir_params, verbose=True) plot_filter(filt, sfreq, freq, gain, 'Chebychev-1 order=8, ripple=1 dB', flim=flim, compensate=True) ############################################################################### # If we can live with even more ripple, we can get it slightly steeper, # but the impulse response begins to ring substantially longer (note the # different x-axis scale): iir_params['rp'] = 6. filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, method='iir', iir_params=iir_params, verbose=True) plot_filter(filt, sfreq, freq, gain, 'Chebychev-1 order=8, ripple=6 dB', flim=flim, compensate=True) ############################################################################### # Applying IIR filters # -------------------- # # Now let's look at how our shallow and steep Butterworth IIR filters # perform on our Morlet signal from before: axes = plt.subplots(1, 2)[1] yticks = np.arange(4) / -30. yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8'] plot_signal(x_orig, offset=yticks[0]) plot_signal(x, offset=yticks[1]) plot_signal(x_shallow, offset=yticks[2]) plot_signal(x_steep, offset=yticks[3]) axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks, ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,) for text in axes[0].get_yticklabels(): text.set(rotation=45, size=8) axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)', ylabel='Magnitude (dB)') mne.viz.adjust_axes(axes) mne.viz.tight_layout() plt.show() ############################################################################### # Some pitfalls of filtering # ========================== # # Multiple recent papers have noted potential risks of drawing # errant inferences due to misapplication of filters. # # Low-pass problems # ----------------- # # Filters in general, especially those that are non-causal (zero-phase), can # make activity appear to occur earlier or later than it truly did. As # mentioned in VanRullen (2011) :footcite:`VanRullen2011`, # investigations of commonly (at the time) # used low-pass filters created artifacts when they were applied to simulated # data. However, such deleterious effects were minimal in many real-world # examples in Rousselet (2012) :footcite:`Rousselet2012`. # # Perhaps more revealing, it was noted in Widmann & Schröger (2012) # :footcite:`WidmannSchroger2012` that the problematic low-pass filters from # VanRullen (2011) :footcite:`VanRullen2011`: # # 1. Used a least-squares design (like :func:`scipy.signal.firls`) that # included "do-not-care" transition regions, which can lead to # uncontrolled behavior. # 2. Had a filter length that was independent of the transition bandwidth, # which can cause excessive ringing and signal distortion. # # .. _tut-filtering-hp-problems: # # High-pass problems # ------------------ # # When it comes to high-pass filtering, using corner frequencies above 0.1 Hz # were found in Acunzo *et al.* (2012) :footcite:`AcunzoEtAl2012` to: # # "... generate a systematic bias easily leading to misinterpretations of # neural activity.” # # In a related paper, Widmann *et al.* (2015) :footcite:`WidmannEtAl2015` # also came to suggest a 0.1 Hz highpass. More evidence followed in # Tanner *et al.* (2015) :footcite:`TannerEtAl2015` of such distortions. # Using data from language ERP studies of semantic and # syntactic processing (i.e., N400 and P600), using a high-pass above 0.3 Hz # caused significant effects to be introduced implausibly early when compared # to the unfiltered data. From this, the authors suggested the optimal # high-pass value for language processing to be 0.1 Hz. # # We can recreate a problematic simulation from # Tanner *et al.* (2015) :footcite:`TannerEtAl2015`: # # "The simulated component is a single-cycle cosine wave with an amplitude # of 5µV [sic], onset of 500 ms poststimulus, and duration of 800 ms. The # simulated component was embedded in 20 s of zero values to avoid # filtering edge effects... Distortions [were] caused by 2 Hz low-pass # and high-pass filters... No visible distortion to the original # waveform [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters... # Filter frequencies correspond to the half-amplitude (-6 dB) cutoff # (12 dB/octave roll-off)." # # .. note:: This simulated signal contains energy not just within the # pass-band, but also within the transition and stop-bands -- perhaps # most easily understood because the signal has a non-zero DC value, # but also because it is a shifted cosine that has been # *windowed* (here multiplied by a rectangular window), which # makes the cosine and DC frequencies spread to other frequencies # (multiplication in time is convolution in frequency, so multiplying # by a rectangular window in the time domain means convolving a sinc # function with the impulses at DC and the cosine frequency in the # frequency domain). # x = np.zeros(int(2 * sfreq)) t = np.arange(0, len(x)) / sfreq - 0.2 onset = np.where(t >= 0.5)[0][0] cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t) x[onset:onset + len(sig)] = sig iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass') iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass') iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass') iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass') x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0) x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0) x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0) x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0) xlim = t[[0, -1]] ylim = [-2, 6] xlabel = 'Time (sec)' ylabel = r'Amplitude ($\mu$V)' tticks = [0, 0.5, 1.3, t[-1]] axes = plt.subplots(2, 2)[1].ravel() for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1], ['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']): ax.plot(t, x, color='0.5') ax.plot(t, x_f, color='k', linestyle='--') ax.set(ylim=ylim, xlim=xlim, xticks=tticks, title=title, xlabel=xlabel, ylabel=ylabel) mne.viz.adjust_axes(axes) mne.viz.tight_layout() plt.show() ############################################################################### # Similarly, in a P300 paradigm reported by # Kappenman & Luck (2010) :footcite:`KappenmanLuck2010`, # they found that applying a 1 Hz high-pass decreased the probability of # finding a significant difference in the N100 response, likely because # the P300 response was smeared (and inverted) in time by the high-pass # filter such that it tended to cancel out the increased N100. However, # they nonetheless note that some high-passing can still be useful to deal # with drifts in the data. # # Even though these papers generally advise a 0.1 Hz or lower frequency for # a high-pass, it is important to keep in mind (as most authors note) that # filtering choices should depend on the frequency content of both the # signal(s) of interest and the noise to be suppressed. For example, in # some of the MNE-Python examples involving the :ref:`sample-dataset` dataset, # high-pass values of around 1 Hz are used when looking at auditory # or visual N100 responses, because we analyze standard (not deviant) trials # and thus expect that contamination by later or slower components will # be limited. # # Baseline problems (or solutions?) # --------------------------------- # # In an evolving discussion, Tanner *et al.* (2015) :footcite:`TannerEtAl2015` # suggest using baseline correction to remove slow drifts in data. However, # Maess *et al.* (2016) :footcite:`MaessEtAl2016` # suggest that baseline correction, which is a form of high-passing, does # not offer substantial advantages over standard high-pass filtering. # Tanner *et al.* (2016) :footcite:`TannerEtAl2016` # rebutted that baseline correction can correct for problems with filtering. # # To see what they mean, consider again our old simulated signal ``x`` from # before: def baseline_plot(x): all_axes = plt.subplots(3, 2)[1] for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])): for ci, ax in enumerate(axes): if ci == 0: iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass', output='sos') x_hp = signal.sosfiltfilt(iir_hp, x, padlen=0) else: x_hp -= x_hp[t < 0].mean() ax.plot(t, x, color='0.5') ax.plot(t, x_hp, color='k', linestyle='--') if ri == 0: ax.set(title=('No ' if ci == 0 else '') + 'Baseline Correction') ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel) ax.set_ylabel('%0.1f Hz' % freq, rotation=0, horizontalalignment='right') mne.viz.adjust_axes(axes) mne.viz.tight_layout() plt.suptitle(title) plt.show() baseline_plot(x) ############################################################################### # In response, Maess *et al.* (2016) :footcite:`MaessEtAl2016a` # note that these simulations do not # address cases of pre-stimulus activity that is shared across conditions, as # applying baseline correction will effectively copy the topology outside the # baseline period. We can see this if we give our signal ``x`` with some # consistent pre-stimulus activity, which makes everything look bad. # # .. note:: An important thing to keep in mind with these plots is that they # are for a single simulated sensor. In multi-electrode recordings # the topology (i.e., spatial pattern) of the pre-stimulus activity # will leak into the post-stimulus period. This will likely create a # spatially varying distortion of the time-domain signals, as the # averaged pre-stimulus spatial pattern gets subtracted from the # sensor time courses. # # Putting some activity in the baseline period: n_pre = (t < 0).sum() sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre)) x[:n_pre] += sig_pre baseline_plot(x) ############################################################################### # Both groups seem to acknowledge that the choices of filtering cutoffs, and # perhaps even the application of baseline correction, depend on the # characteristics of the data being investigated, especially when it comes to: # # 1. The frequency content of the underlying evoked activity relative # to the filtering parameters. # 2. The validity of the assumption of no consistent evoked activity # in the baseline period. # # We thus recommend carefully applying baseline correction and/or high-pass # values based on the characteristics of the data to be analyzed. # # # Filtering defaults # ================== # # .. _tut-filtering-in-python: # # Defaults in MNE-Python # ---------------------- # # Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level, # and thus :func:`mne.io.Raw.filter` is used. This function under the hood # (among other things) calls :func:`mne.filter.filter_data` to actually # filter the data, which by default applies a zero-phase FIR filter designed # using :func:`scipy.signal.firwin`. # In Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, they # suggest a specific set of parameters to use for high-pass filtering, # including: # # "... providing a transition bandwidth of 25% of the lower passband # edge but, where possible, not lower than 2 Hz and otherwise the # distance from the passband edge to the critical frequency.” # # In practice, this means that for each high-pass value ``l_freq`` or # low-pass value ``h_freq`` below, you would get this corresponding # ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively, # if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz): # # +------------------+-------------------+-------------------+ # | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth | # +==================+===================+===================+ # | 0.01 | 0.01 | 2.0 | # +------------------+-------------------+-------------------+ # | 0.1 | 0.1 | 2.0 | # +------------------+-------------------+-------------------+ # | 1.0 | 1.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 2.0 | 2.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 4.0 | 2.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 8.0 | 2.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 10.0 | 2.5 | 2.5 | # +------------------+-------------------+-------------------+ # | 20.0 | 5.0 | 5.0 | # +------------------+-------------------+-------------------+ # | 40.0 | 10.0 | 10.0 | # +------------------+-------------------+-------------------+ # | 50.0 | 12.5 | 12.5 | # +------------------+-------------------+-------------------+ # # MNE-Python has adopted this definition for its high-pass (and low-pass) # transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and # ``h_trans_bandwidth='auto'``. # # To choose the filter length automatically with ``filter_length='auto'``, # the reciprocal of the shortest transition bandwidth is used to ensure # decent attenuation at the stop frequency. Specifically, the reciprocal # (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming, # or Blackman windows, respectively, as selected by the ``fir_window`` # argument for ``fir_design='firwin'``, and double these for # ``fir_design='firwin2'`` mode. # # .. note:: For ``fir_design='firwin2'``, the multiplicative factors are # doubled compared to what is given in # Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002` # (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect # on the frequency response, which we compensate for by # increasing the filter length. This is why # ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``. # # In 0.14, we default to using a Hamming window in filter design, as it # provides up to 53 dB of stop-band attenuation with small pass-band ripple. # # .. note:: In band-pass applications, often a low-pass filter can operate # effectively with fewer samples than the high-pass filter, so # it is advisable to apply the high-pass and low-pass separately # when using ``fir_design='firwin2'``. For design mode # ``fir_design='firwin'``, there is no need to separate the # operations, as the lowpass and highpass elements are constructed # separately to meet the transition band requirements. # # For more information on how to use the # MNE-Python filtering functions with real data, consult the preprocessing # tutorial on :ref:`tut-filter-resample`. # # Defaults in MNE-C # ----------------- # MNE-C by default uses: # # 1. 5 Hz transition band for low-pass filters. # 2. 3-sample transition band for high-pass filters. # 3. Filter length of 8197 samples. # # The filter is designed in the frequency domain, creating a linear-phase # filter such that the delay is compensated for as is done with the MNE-Python # ``phase='zero'`` filtering option. # # Squared-cosine ramps are used in the transition regions. Because these # are used in place of more gradual (e.g., linear) transitions, # a given transition width will result in more temporal ringing but also more # rapid attenuation than the same transition width in windowed FIR designs. # # The default filter length will generally have excellent attenuation # but long ringing for the sample rates typically encountered in M/EEG data # (e.g. 500-2000 Hz). # # Defaults in other software # -------------------------- # A good but possibly outdated comparison of filtering in various software # packages is available in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`. # Briefly: # # * EEGLAB # MNE-Python 0.14 defaults to behavior very similar to that of EEGLAB # (see the `EEGLAB filtering FAQ`_ for more information). # * FieldTrip # By default FieldTrip applies a forward-backward Butterworth IIR filter # of order 4 (band-pass and band-stop filters) or 2 (for low-pass and # high-pass filters). Similar filters can be achieved in MNE-Python when # filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>` # (see also :func:`mne.filter.construct_iir_filter` for options). # For more information, see e.g. the # `FieldTrip band-pass documentation <ftbp_>`_. # # Reporting Filters # ================= # On page 45 in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, # there is a convenient list of # important filter parameters that should be reported with each publication: # # 1. Filter type (high-pass, low-pass, band-pass, band-stop, FIR, IIR) # 2. Cutoff frequency (including definition) # 3. Filter order (or length) # 4. Roll-off or transition bandwidth # 5. Passband ripple and stopband attenuation # 6. Filter delay (zero-phase, linear-phase, non-linear phase) and causality # 7. Direction of computation (one-pass forward/reverse, or two-pass forward # and reverse) # # In the following, we will address how to deal with these parameters in MNE: # # # Filter type # ----------- # Depending on the function or method used, the filter type can be specified. # To name an example, in :func:`mne.filter.create_filter`, the relevant # arguments would be ``l_freq``, ``h_freq``, ``method``, and if the method is # FIR ``fir_window`` and ``fir_design``. # # # Cutoff frequency # ---------------- # The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the # middle of the transition band. That is, if you construct a lowpass FIR filter # with ``h_freq = 40``, the filter function will provide a transition # bandwidth that depends on the ``h_trans_bandwidth`` argument. The desired # half-amplitude cutoff of the lowpass FIR filter is then at # ``h_freq + transition_bandwidth/2.``. # # Filter length (order) and transition bandwidth (roll-off) # --------------------------------------------------------- # In the :ref:`tut-filtering-in-python` section, we have already talked about # the default filter lengths and transition bandwidths that are used when no # custom values are specified using the respective filter function's arguments. # # If you want to find out about the filter length and transition bandwidth that # were used through the 'auto' setting, you can use # :func:`mne.filter.create_filter` to print out the settings once more: # Use the same settings as when calling e.g., `raw.filter()` fir_coefs = mne.filter.create_filter( data=None, # data is only used for sanity checking, not strictly needed sfreq=1000., # sfreq of your data in Hz l_freq=None, h_freq=40., # assuming a lowpass of 40 Hz method='fir', fir_window='hamming', fir_design='firwin', verbose=True) # See the printed log for the transition bandwidth and filter length. # Alternatively, get the filter length through: filter_length = fir_coefs.shape[0] ############################################################################### # .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter` # will not print a filter length and transition bandwidth to the log. # Instead, you can specify the roll-off with the ``iir_params`` # argument or stay with the default, which is a fourth order # (Butterworth) filter. # # Passband ripple and stopband attenuation # ---------------------------------------- # # When use standard :func:`scipy.signal.firwin` design (as for FIR filters in # MNE), the passband ripple and stopband attenuation are dependent upon the # window used in design. For standard windows the values are listed in this # table (see Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, p. 357): # # +-------------------------+-----------------+----------------------+ # | Name of window function | Passband ripple | Stopband attenuation | # +=========================+=================+======================+ # | Hann | 0.0545 dB | 44 dB | # +-------------------------+-----------------+----------------------+ # | Hamming | 0.0194 dB | 53 dB | # +-------------------------+-----------------+----------------------+ # | Blackman | 0.0017 dB | 74 dB | # +-------------------------+-----------------+----------------------+ # # # Filter delay and direction of computation # ----------------------------------------- # For reporting this information, it might be sufficient to read the docstring # of the filter function or method that you apply. For example in the # docstring of `mne.filter.create_filter`, for the phase parameter it says: # # Phase of the filter, only used if ``method='fir'``. # By default, a symmetric linear-phase FIR filter is constructed. # If ``phase='zero'`` (default), the delay of this filter # is compensated for. If ``phase=='zero-double'``, then this filter # is applied twice, once forward, and once backward. If 'minimum', # then a minimum-phase, causal filter will be used. # # # Summary # ======= # # When filtering, there are always trade-offs that should be considered. # One important trade-off is between time-domain characteristics (like ringing) # and frequency-domain attenuation characteristics (like effective transition # bandwidth). Filters with sharp frequency cutoffs can produce outputs that # ring for a long time when they operate on signals with frequency content # in the transition band. In general, therefore, the wider a transition band # that can be tolerated, the better behaved the filter will be in the time # domain. # # References # ========== # .. footbibliography:: # # .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response # .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response # .. _sinc: https://en.wikipedia.org/wiki/Sinc_function # .. _moving average: https://en.wikipedia.org/wiki/Moving_average # .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model # .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm # .. _matlab firpm: https://www.mathworks.com/help/signal/ref/firpm.html # .. _matlab fir2: https://www.mathworks.com/help/signal/ref/fir2.html # .. _matlab firls: https://www.mathworks.com/help/signal/ref/firls.html # .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter # .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ # .. _ftbp: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter
bsd-3-clause
mihail911/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_pdf.py
69
71773
# -*- coding: iso-8859-1 -*- """ A PDF matplotlib backend (not yet complete) Author: Jouni K Seppänen <jks@iki.fi> """ from __future__ import division import os import re import sys import time import warnings import zlib import numpy as npy from cStringIO import StringIO from datetime import datetime from math import ceil, cos, floor, pi, sin try: set except NameError: from sets import Set as set import matplotlib from matplotlib import __version__, rcParams, get_data_path from matplotlib._pylab_helpers import Gcf from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\ FigureManagerBase, FigureCanvasBase from matplotlib.backends.backend_mixed import MixedModeRenderer from matplotlib.cbook import Bunch, is_string_like, reverse_dict, \ get_realpath_and_stat, is_writable_file_like, maxdict from matplotlib.mlab import quad2cubic from matplotlib.figure import Figure from matplotlib.font_manager import findfont, is_opentype_cff_font from matplotlib.afm import AFM import matplotlib.type1font as type1font import matplotlib.dviread as dviread from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \ LOAD_NO_HINTING, KERNING_UNFITTED from matplotlib.mathtext import MathTextParser from matplotlib.transforms import Affine2D, Bbox, BboxBase from matplotlib.path import Path from matplotlib import ttconv # Overview # # The low-level knowledge about pdf syntax lies mainly in the pdfRepr # function and the classes Reference, Name, Operator, and Stream. The # PdfFile class knows about the overall structure of pdf documents. # It provides a "write" method for writing arbitrary strings in the # file, and an "output" method that passes objects through the pdfRepr # function before writing them in the file. The output method is # called by the RendererPdf class, which contains the various draw_foo # methods. RendererPdf contains a GraphicsContextPdf instance, and # each draw_foo calls self.check_gc before outputting commands. This # method checks whether the pdf graphics state needs to be modified # and outputs the necessary commands. GraphicsContextPdf represents # the graphics state, and its "delta" method returns the commands that # modify the state. # Add "pdf.use14corefonts: True" in your configuration file to use only # the 14 PDF core fonts. These fonts do not need to be embedded; every # PDF viewing application is required to have them. This results in very # light PDF files you can use directly in LaTeX or ConTeXt documents # generated with pdfTeX, without any conversion. # These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique, # Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique, # Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic, # Times-BoldItalic, Symbol, ZapfDingbats. # # Some tricky points: # # 1. The clip path can only be widened by popping from the state # stack. Thus the state must be pushed onto the stack before narrowing # the clip path. This is taken care of by GraphicsContextPdf. # # 2. Sometimes it is necessary to refer to something (e.g. font, # image, or extended graphics state, which contains the alpha value) # in the page stream by a name that needs to be defined outside the # stream. PdfFile provides the methods fontName, imageObject, and # alphaState for this purpose. The implementations of these methods # should perhaps be generalized. # TODOs: # # * the alpha channel of images # * image compression could be improved (PDF supports png-like compression) # * encoding of fonts, including mathtext fonts and unicode support # * Type 1 font support (i.e., "pdf.use_afm") # * TTF support has lots of small TODOs, e.g. how do you know if a font # is serif/sans-serif, or symbolic/non-symbolic? # * draw_markers, draw_line_collection, etc. # * use_tex def fill(strings, linelen=75): """Make one string from sequence of strings, with whitespace in between. The whitespace is chosen to form lines of at most linelen characters, if possible.""" currpos = 0 lasti = 0 result = [] for i, s in enumerate(strings): length = len(s) if currpos + length < linelen: currpos += length + 1 else: result.append(' '.join(strings[lasti:i])) lasti = i currpos = length result.append(' '.join(strings[lasti:])) return '\n'.join(result) _string_escape_regex = re.compile(r'([\\()])') def pdfRepr(obj): """Map Python objects to PDF syntax.""" # Some objects defined later have their own pdfRepr method. if hasattr(obj, 'pdfRepr'): return obj.pdfRepr() # Floats. PDF does not have exponential notation (1.0e-10) so we # need to use %f with some precision. Perhaps the precision # should adapt to the magnitude of the number? elif isinstance(obj, float): if not npy.isfinite(obj): raise ValueError, "Can only output finite numbers in PDF" r = "%.10f" % obj return r.rstrip('0').rstrip('.') # Integers are written as such. elif isinstance(obj, (int, long)): return "%d" % obj # Strings are written in parentheses, with backslashes and parens # escaped. Actually balanced parens are allowed, but it is # simpler to escape them all. TODO: cut long strings into lines; # I believe there is some maximum line length in PDF. elif is_string_like(obj): return '(' + _string_escape_regex.sub(r'\\\1', obj) + ')' # Dictionaries. The keys must be PDF names, so if we find strings # there, we make Name objects from them. The values may be # anything, so the caller must ensure that PDF names are # represented as Name objects. elif isinstance(obj, dict): r = ["<<"] r.extend(["%s %s" % (Name(key).pdfRepr(), pdfRepr(val)) for key, val in obj.items()]) r.append(">>") return fill(r) # Lists. elif isinstance(obj, (list, tuple)): r = ["["] r.extend([pdfRepr(val) for val in obj]) r.append("]") return fill(r) # Booleans. elif isinstance(obj, bool): return ['false', 'true'][obj] # The null keyword. elif obj is None: return 'null' # A date. elif isinstance(obj, datetime): r = obj.strftime('D:%Y%m%d%H%M%S') if time.daylight: z = time.altzone else: z = time.timezone if z == 0: r += 'Z' elif z < 0: r += "+%02d'%02d'" % ((-z)//3600, (-z)%3600) else: r += "-%02d'%02d'" % (z//3600, z%3600) return pdfRepr(r) # A bounding box elif isinstance(obj, BboxBase): return fill([pdfRepr(val) for val in obj.bounds]) else: raise TypeError, \ "Don't know a PDF representation for %s objects." \ % type(obj) class Reference: """PDF reference object. Use PdfFile.reserveObject() to create References. """ def __init__(self, id): self.id = id def __repr__(self): return "<Reference %d>" % self.id def pdfRepr(self): return "%d 0 R" % self.id def write(self, contents, file): write = file.write write("%d 0 obj\n" % self.id) write(pdfRepr(contents)) write("\nendobj\n") class Name: """PDF name object.""" _regex = re.compile(r'[^!-~]') def __init__(self, name): if isinstance(name, Name): self.name = name.name else: self.name = self._regex.sub(Name.hexify, name) def __repr__(self): return "<Name %s>" % self.name def hexify(match): return '#%02x' % ord(match.group()) hexify = staticmethod(hexify) def pdfRepr(self): return '/' + self.name class Operator: """PDF operator object.""" def __init__(self, op): self.op = op def __repr__(self): return '<Operator %s>' % self.op def pdfRepr(self): return self.op # PDF operators (not an exhaustive list) _pdfops = dict(close_fill_stroke='b', fill_stroke='B', fill='f', closepath='h', close_stroke='s', stroke='S', endpath='n', begin_text='BT', end_text='ET', curveto='c', rectangle='re', lineto='l', moveto='m', concat_matrix='cm', use_xobject='Do', setgray_stroke='G', setgray_nonstroke='g', setrgb_stroke='RG', setrgb_nonstroke='rg', setcolorspace_stroke='CS', setcolorspace_nonstroke='cs', setcolor_stroke='SCN', setcolor_nonstroke='scn', setdash='d', setlinejoin='j', setlinecap='J', setgstate='gs', gsave='q', grestore='Q', textpos='Td', selectfont='Tf', textmatrix='Tm', show='Tj', showkern='TJ', setlinewidth='w', clip='W') Op = Bunch(**dict([(name, Operator(value)) for name, value in _pdfops.items()])) class Stream: """PDF stream object. This has no pdfRepr method. Instead, call begin(), then output the contents of the stream by calling write(), and finally call end(). """ def __init__(self, id, len, file, extra=None): """id: object id of stream; len: an unused Reference object for the length of the stream, or None (to use a memory buffer); file: a PdfFile; extra: a dictionary of extra key-value pairs to include in the stream header """ self.id = id # object id self.len = len # id of length object self.pdfFile = file self.file = file.fh # file to which the stream is written self.compressobj = None # compression object if extra is None: self.extra = dict() else: self.extra = extra self.pdfFile.recordXref(self.id) if rcParams['pdf.compression']: self.compressobj = zlib.compressobj(rcParams['pdf.compression']) if self.len is None: self.file = StringIO() else: self._writeHeader() self.pos = self.file.tell() def _writeHeader(self): write = self.file.write write("%d 0 obj\n" % self.id) dict = self.extra dict['Length'] = self.len if rcParams['pdf.compression']: dict['Filter'] = Name('FlateDecode') write(pdfRepr(dict)) write("\nstream\n") def end(self): """Finalize stream.""" self._flush() if self.len is None: contents = self.file.getvalue() self.len = len(contents) self.file = self.pdfFile.fh self._writeHeader() self.file.write(contents) self.file.write("\nendstream\nendobj\n") else: length = self.file.tell() - self.pos self.file.write("\nendstream\nendobj\n") self.pdfFile.writeObject(self.len, length) def write(self, data): """Write some data on the stream.""" if self.compressobj is None: self.file.write(data) else: compressed = self.compressobj.compress(data) self.file.write(compressed) def _flush(self): """Flush the compression object.""" if self.compressobj is not None: compressed = self.compressobj.flush() self.file.write(compressed) self.compressobj = None class PdfFile: """PDF file with one page.""" def __init__(self, width, height, dpi, filename): self.width, self.height = width, height self.dpi = dpi if rcParams['path.simplify']: self.simplify = (width * dpi, height * dpi) else: self.simplify = None self.nextObject = 1 # next free object id self.xrefTable = [ [0, 65535, 'the zero object'] ] self.passed_in_file_object = False if is_string_like(filename): fh = file(filename, 'wb') elif is_writable_file_like(filename): fh = filename self.passed_in_file_object = True else: raise ValueError("filename must be a path or a file-like object") self.fh = fh self.currentstream = None # stream object to write to, if any fh.write("%PDF-1.4\n") # 1.4 is the first version to have alpha # Output some eight-bit chars as a comment so various utilities # recognize the file as binary by looking at the first few # lines (see note in section 3.4.1 of the PDF reference). fh.write("%\254\334 \253\272\n") self.rootObject = self.reserveObject('root') self.infoObject = self.reserveObject('info') pagesObject = self.reserveObject('pages') thePageObject = self.reserveObject('page 0') contentObject = self.reserveObject('contents of page 0') self.fontObject = self.reserveObject('fonts') self.alphaStateObject = self.reserveObject('extended graphics states') self.hatchObject = self.reserveObject('tiling patterns') self.XObjectObject = self.reserveObject('external objects') resourceObject = self.reserveObject('resources') root = { 'Type': Name('Catalog'), 'Pages': pagesObject } self.writeObject(self.rootObject, root) info = { 'Creator': 'matplotlib ' + __version__ \ + ', http://matplotlib.sf.net', 'Producer': 'matplotlib pdf backend', 'CreationDate': datetime.today() } # Possible TODO: Title, Author, Subject, Keywords self.writeObject(self.infoObject, info) pages = { 'Type': Name('Pages'), 'Kids': [ thePageObject ], 'Count': 1 } self.writeObject(pagesObject, pages) thePage = { 'Type': Name('Page'), 'Parent': pagesObject, 'Resources': resourceObject, 'MediaBox': [ 0, 0, dpi*width, dpi*height ], 'Contents': contentObject } self.writeObject(thePageObject, thePage) # self.fontNames maps filenames to internal font names self.fontNames = {} self.nextFont = 1 # next free internal font name self.fontInfo = {} # information on fonts: metrics, encoding self.alphaStates = {} # maps alpha values to graphics state objects self.nextAlphaState = 1 self.hatchPatterns = {} self.nextHatch = 1 self.images = {} self.nextImage = 1 self.markers = {} self.multi_byte_charprocs = {} # The PDF spec recommends to include every procset procsets = [ Name(x) for x in "PDF Text ImageB ImageC ImageI".split() ] # Write resource dictionary. # Possibly TODO: more general ExtGState (graphics state dictionaries) # ColorSpace Pattern Shading Properties resources = { 'Font': self.fontObject, 'XObject': self.XObjectObject, 'ExtGState': self.alphaStateObject, 'Pattern': self.hatchObject, 'ProcSet': procsets } self.writeObject(resourceObject, resources) # Start the content stream of the page self.beginStream(contentObject.id, self.reserveObject('length of content stream')) def close(self): # End the content stream and write out the various deferred # objects self.endStream() self.writeFonts() self.writeObject(self.alphaStateObject, dict([(val[0], val[1]) for val in self.alphaStates.values()])) self.writeHatches() xobjects = dict(self.images.values()) for tup in self.markers.values(): xobjects[tup[0]] = tup[1] for name, value in self.multi_byte_charprocs.items(): xobjects[name] = value self.writeObject(self.XObjectObject, xobjects) self.writeImages() self.writeMarkers() self.writeXref() self.writeTrailer() if self.passed_in_file_object: self.fh.flush() else: self.fh.close() def write(self, data): if self.currentstream is None: self.fh.write(data) else: self.currentstream.write(data) def output(self, *data): self.write(fill(map(pdfRepr, data))) self.write('\n') def beginStream(self, id, len, extra=None): assert self.currentstream is None self.currentstream = Stream(id, len, self, extra) def endStream(self): self.currentstream.end() self.currentstream = None def fontName(self, fontprop): """ Select a font based on fontprop and return a name suitable for Op.selectfont. If fontprop is a string, it will be interpreted as the filename of the font. """ if is_string_like(fontprop): filename = fontprop elif rcParams['pdf.use14corefonts']: filename = findfont(fontprop, fontext='afm') else: filename = findfont(fontprop) Fx = self.fontNames.get(filename) if Fx is None: Fx = Name('F%d' % self.nextFont) self.fontNames[filename] = Fx self.nextFont += 1 return Fx def writeFonts(self): fonts = {} for filename, Fx in self.fontNames.items(): if filename.endswith('.afm'): fontdictObject = self._write_afm_font(filename) elif filename.endswith('.pfb') or filename.endswith('.pfa'): # a Type 1 font; limited support for now fontdictObject = self.embedType1(filename, self.fontInfo[Fx]) else: realpath, stat_key = get_realpath_and_stat(filename) chars = self.used_characters.get(stat_key) if chars is not None and len(chars[1]): fontdictObject = self.embedTTF(realpath, chars[1]) fonts[Fx] = fontdictObject #print >>sys.stderr, filename self.writeObject(self.fontObject, fonts) def _write_afm_font(self, filename): fh = file(filename) font = AFM(fh) fh.close() fontname = font.get_fontname() fontdict = { 'Type': Name('Font'), 'Subtype': Name('Type1'), 'BaseFont': Name(fontname), 'Encoding': Name('WinAnsiEncoding') } fontdictObject = self.reserveObject('font dictionary') self.writeObject(fontdictObject, fontdict) return fontdictObject def embedType1(self, filename, fontinfo): # TODO: font effects such as SlantFont fh = open(filename, 'rb') matplotlib.verbose.report( 'Embedding Type 1 font ' + filename, 'debug') try: fontdata = fh.read() finally: fh.close() font = FT2Font(filename) widthsObject, fontdescObject, fontdictObject, fontfileObject = \ [ self.reserveObject(n) for n in ('font widths', 'font descriptor', 'font dictionary', 'font file') ] firstchar = 0 lastchar = len(fontinfo.widths) - 1 fontdict = { 'Type': Name('Font'), 'Subtype': Name('Type1'), 'BaseFont': Name(font.postscript_name), 'FirstChar': 0, 'LastChar': lastchar, 'Widths': widthsObject, 'FontDescriptor': fontdescObject, } if fontinfo.encodingfile is not None: enc = dviread.Encoding(fontinfo.encodingfile) differencesArray = [ Name(ch) for ch in enc ] differencesArray = [ 0 ] + differencesArray fontdict.update({ 'Encoding': { 'Type': Name('Encoding'), 'Differences': differencesArray }, }) _, _, fullname, familyname, weight, italic_angle, fixed_pitch, \ ul_position, ul_thickness = font.get_ps_font_info() flags = 0 if fixed_pitch: flags |= 1 << 0 # fixed width if 0: flags |= 1 << 1 # TODO: serif if 1: flags |= 1 << 2 # TODO: symbolic (most TeX fonts are) else: flags |= 1 << 5 # non-symbolic if italic_angle: flags |= 1 << 6 # italic if 0: flags |= 1 << 16 # TODO: all caps if 0: flags |= 1 << 17 # TODO: small caps if 0: flags |= 1 << 18 # TODO: force bold descriptor = { 'Type': Name('FontDescriptor'), 'FontName': Name(font.postscript_name), 'Flags': flags, 'FontBBox': font.bbox, 'ItalicAngle': italic_angle, 'Ascent': font.ascender, 'Descent': font.descender, 'CapHeight': 1000, # TODO: find this out 'XHeight': 500, # TODO: this one too 'FontFile': fontfileObject, 'FontFamily': familyname, 'StemV': 50, # TODO # (see also revision 3874; but not all TeX distros have AFM files!) #'FontWeight': a number where 400 = Regular, 700 = Bold } self.writeObject(fontdictObject, fontdict) self.writeObject(widthsObject, fontinfo.widths) self.writeObject(fontdescObject, descriptor) t1font = type1font.Type1Font(filename) self.beginStream(fontfileObject.id, None, { 'Length1': len(t1font.parts[0]), 'Length2': len(t1font.parts[1]), 'Length3': 0 }) self.currentstream.write(t1font.parts[0]) self.currentstream.write(t1font.parts[1]) self.endStream() return fontdictObject def _get_xobject_symbol_name(self, filename, symbol_name): return "%s-%s" % ( os.path.splitext(os.path.basename(filename))[0], symbol_name) _identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo << /Registry (Adobe) /Ordering (UCS) /Supplement 0 >> def /CMapName /Adobe-Identity-UCS def /CMapType 2 def 1 begincodespacerange <0000> <ffff> endcodespacerange %d beginbfrange %s endbfrange endcmap CMapName currentdict /CMap defineresource pop end end""" def embedTTF(self, filename, characters): """Embed the TTF font from the named file into the document.""" font = FT2Font(str(filename)) fonttype = rcParams['pdf.fonttype'] def cvt(length, upe=font.units_per_EM, nearest=True): "Convert font coordinates to PDF glyph coordinates" value = length / upe * 1000 if nearest: return round(value) # Perhaps best to round away from zero for bounding # boxes and the like if value < 0: return floor(value) else: return ceil(value) def embedTTFType3(font, characters, descriptor): """The Type 3-specific part of embedding a Truetype font""" widthsObject = self.reserveObject('font widths') fontdescObject = self.reserveObject('font descriptor') fontdictObject = self.reserveObject('font dictionary') charprocsObject = self.reserveObject('character procs') differencesArray = [] firstchar, lastchar = 0, 255 bbox = [cvt(x, nearest=False) for x in font.bbox] fontdict = { 'Type' : Name('Font'), 'BaseFont' : ps_name, 'FirstChar' : firstchar, 'LastChar' : lastchar, 'FontDescriptor' : fontdescObject, 'Subtype' : Name('Type3'), 'Name' : descriptor['FontName'], 'FontBBox' : bbox, 'FontMatrix' : [ .001, 0, 0, .001, 0, 0 ], 'CharProcs' : charprocsObject, 'Encoding' : { 'Type' : Name('Encoding'), 'Differences' : differencesArray}, 'Widths' : widthsObject } # Make the "Widths" array from encodings import cp1252 # The "decoding_map" was changed to a "decoding_table" as of Python 2.5. if hasattr(cp1252, 'decoding_map'): def decode_char(charcode): return cp1252.decoding_map[charcode] or 0 else: def decode_char(charcode): return ord(cp1252.decoding_table[charcode]) def get_char_width(charcode): unicode = decode_char(charcode) width = font.load_char(unicode, flags=LOAD_NO_SCALE|LOAD_NO_HINTING).horiAdvance return cvt(width) widths = [ get_char_width(charcode) for charcode in range(firstchar, lastchar+1) ] descriptor['MaxWidth'] = max(widths) # Make the "Differences" array, sort the ccodes < 255 from # the multi-byte ccodes, and build the whole set of glyph ids # that we need from this font. cmap = font.get_charmap() glyph_ids = [] differences = [] multi_byte_chars = set() for c in characters: ccode = c gind = cmap.get(ccode) or 0 glyph_ids.append(gind) glyph_name = font.get_glyph_name(gind) if ccode <= 255: differences.append((ccode, glyph_name)) else: multi_byte_chars.add(glyph_name) differences.sort() last_c = -2 for c, name in differences: if c != last_c + 1: differencesArray.append(c) differencesArray.append(Name(name)) last_c = c # Make the charprocs array (using ttconv to generate the # actual outlines) rawcharprocs = ttconv.get_pdf_charprocs(filename, glyph_ids) charprocs = {} charprocsRef = {} for charname, stream in rawcharprocs.items(): charprocDict = { 'Length': len(stream) } # The 2-byte characters are used as XObjects, so they # need extra info in their dictionary if charname in multi_byte_chars: charprocDict['Type'] = Name('XObject') charprocDict['Subtype'] = Name('Form') charprocDict['BBox'] = bbox # Each glyph includes bounding box information, # but xpdf and ghostscript can't handle it in a # Form XObject (they segfault!!!), so we remove it # from the stream here. It's not needed anyway, # since the Form XObject includes it in its BBox # value. stream = stream[stream.find("d1") + 2:] charprocObject = self.reserveObject('charProc') self.beginStream(charprocObject.id, None, charprocDict) self.currentstream.write(stream) self.endStream() # Send the glyphs with ccode > 255 to the XObject dictionary, # and the others to the font itself if charname in multi_byte_chars: name = self._get_xobject_symbol_name(filename, charname) self.multi_byte_charprocs[name] = charprocObject else: charprocs[charname] = charprocObject # Write everything out self.writeObject(fontdictObject, fontdict) self.writeObject(fontdescObject, descriptor) self.writeObject(widthsObject, widths) self.writeObject(charprocsObject, charprocs) return fontdictObject def embedTTFType42(font, characters, descriptor): """The Type 42-specific part of embedding a Truetype font""" fontdescObject = self.reserveObject('font descriptor') cidFontDictObject = self.reserveObject('CID font dictionary') type0FontDictObject = self.reserveObject('Type 0 font dictionary') cidToGidMapObject = self.reserveObject('CIDToGIDMap stream') fontfileObject = self.reserveObject('font file stream') wObject = self.reserveObject('Type 0 widths') toUnicodeMapObject = self.reserveObject('ToUnicode map') cidFontDict = { 'Type' : Name('Font'), 'Subtype' : Name('CIDFontType2'), 'BaseFont' : ps_name, 'CIDSystemInfo' : { 'Registry' : 'Adobe', 'Ordering' : 'Identity', 'Supplement' : 0 }, 'FontDescriptor' : fontdescObject, 'W' : wObject, 'CIDToGIDMap' : cidToGidMapObject } type0FontDict = { 'Type' : Name('Font'), 'Subtype' : Name('Type0'), 'BaseFont' : ps_name, 'Encoding' : Name('Identity-H'), 'DescendantFonts' : [cidFontDictObject], 'ToUnicode' : toUnicodeMapObject } # Make fontfile stream descriptor['FontFile2'] = fontfileObject length1Object = self.reserveObject('decoded length of a font') self.beginStream( fontfileObject.id, self.reserveObject('length of font stream'), {'Length1': length1Object}) fontfile = open(filename, 'rb') length1 = 0 while True: data = fontfile.read(4096) if not data: break length1 += len(data) self.currentstream.write(data) fontfile.close() self.endStream() self.writeObject(length1Object, length1) # Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap # at the same time cid_to_gid_map = [u'\u0000'] * 65536 cmap = font.get_charmap() unicode_mapping = [] widths = [] max_ccode = 0 for c in characters: ccode = c gind = cmap.get(ccode) or 0 glyph = font.load_char(ccode, flags=LOAD_NO_HINTING) widths.append((ccode, glyph.horiAdvance / 6)) if ccode < 65536: cid_to_gid_map[ccode] = unichr(gind) max_ccode = max(ccode, max_ccode) widths.sort() cid_to_gid_map = cid_to_gid_map[:max_ccode + 1] last_ccode = -2 w = [] max_width = 0 unicode_groups = [] for ccode, width in widths: if ccode != last_ccode + 1: w.append(ccode) w.append([width]) unicode_groups.append([ccode, ccode]) else: w[-1].append(width) unicode_groups[-1][1] = ccode max_width = max(max_width, width) last_ccode = ccode unicode_bfrange = [] for start, end in unicode_groups: unicode_bfrange.append( "<%04x> <%04x> [%s]" % (start, end, " ".join(["<%04x>" % x for x in range(start, end+1)]))) unicode_cmap = (self._identityToUnicodeCMap % (len(unicode_groups), "\n".join(unicode_bfrange))) # CIDToGIDMap stream cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be") self.beginStream(cidToGidMapObject.id, None, {'Length': len(cid_to_gid_map)}) self.currentstream.write(cid_to_gid_map) self.endStream() # ToUnicode CMap self.beginStream(toUnicodeMapObject.id, None, {'Length': unicode_cmap}) self.currentstream.write(unicode_cmap) self.endStream() descriptor['MaxWidth'] = max_width # Write everything out self.writeObject(cidFontDictObject, cidFontDict) self.writeObject(type0FontDictObject, type0FontDict) self.writeObject(fontdescObject, descriptor) self.writeObject(wObject, w) return type0FontDictObject # Beginning of main embedTTF function... # You are lost in a maze of TrueType tables, all different... ps_name = Name(font.get_sfnt()[(1,0,0,6)]) pclt = font.get_sfnt_table('pclt') \ or { 'capHeight': 0, 'xHeight': 0 } post = font.get_sfnt_table('post') \ or { 'italicAngle': (0,0) } ff = font.face_flags sf = font.style_flags flags = 0 symbolic = False #ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10') if ff & FIXED_WIDTH: flags |= 1 << 0 if 0: flags |= 1 << 1 # TODO: serif if symbolic: flags |= 1 << 2 else: flags |= 1 << 5 if sf & ITALIC: flags |= 1 << 6 if 0: flags |= 1 << 16 # TODO: all caps if 0: flags |= 1 << 17 # TODO: small caps if 0: flags |= 1 << 18 # TODO: force bold descriptor = { 'Type' : Name('FontDescriptor'), 'FontName' : ps_name, 'Flags' : flags, 'FontBBox' : [ cvt(x, nearest=False) for x in font.bbox ], 'Ascent' : cvt(font.ascender, nearest=False), 'Descent' : cvt(font.descender, nearest=False), 'CapHeight' : cvt(pclt['capHeight'], nearest=False), 'XHeight' : cvt(pclt['xHeight']), 'ItalicAngle' : post['italicAngle'][1], # ??? 'StemV' : 0 # ??? } # The font subsetting to a Type 3 font does not work for # OpenType (.otf) that embed a Postscript CFF font, so avoid that -- # save as a (non-subsetted) Type 42 font instead. if is_opentype_cff_font(filename): fonttype = 42 warnings.warn(("'%s' can not be subsetted into a Type 3 font. " + "The entire font will be embedded in the output.") % os.path.basename(filename)) if fonttype == 3: return embedTTFType3(font, characters, descriptor) elif fonttype == 42: return embedTTFType42(font, characters, descriptor) def alphaState(self, alpha): """Return name of an ExtGState that sets alpha to the given value""" state = self.alphaStates.get(alpha, None) if state is not None: return state[0] name = Name('A%d' % self.nextAlphaState) self.nextAlphaState += 1 self.alphaStates[alpha] = \ (name, { 'Type': Name('ExtGState'), 'CA': alpha, 'ca': alpha }) return name def hatchPattern(self, lst): pattern = self.hatchPatterns.get(lst, None) if pattern is not None: return pattern[0] name = Name('H%d' % self.nextHatch) self.nextHatch += 1 self.hatchPatterns[lst] = name return name def writeHatches(self): hatchDict = dict() sidelen = 144.0 density = 24.0 for lst, name in self.hatchPatterns.items(): ob = self.reserveObject('hatch pattern') hatchDict[name] = ob res = { 'Procsets': [ Name(x) for x in "PDF Text ImageB ImageC ImageI".split() ] } self.beginStream( ob.id, None, { 'Type': Name('Pattern'), 'PatternType': 1, 'PaintType': 1, 'TilingType': 1, 'BBox': [0, 0, sidelen, sidelen], 'XStep': sidelen, 'YStep': sidelen, 'Resources': res }) # lst is a tuple of stroke color, fill color, # number of - lines, number of / lines, # number of | lines, number of \ lines rgb = lst[0] self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_stroke) if lst[1] is not None: rgb = lst[1] self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_nonstroke, 0, 0, sidelen, sidelen, Op.rectangle, Op.fill) if lst[2]: # - for j in npy.arange(0.0, sidelen, density/lst[2]): self.output(0, j, Op.moveto, sidelen, j, Op.lineto) if lst[3]: # / for j in npy.arange(0.0, sidelen, density/lst[3]): self.output(0, j, Op.moveto, sidelen-j, sidelen, Op.lineto, sidelen-j, 0, Op.moveto, sidelen, j, Op.lineto) if lst[4]: # | for j in npy.arange(0.0, sidelen, density/lst[4]): self.output(j, 0, Op.moveto, j, sidelen, Op.lineto) if lst[5]: # \ for j in npy.arange(sidelen, 0.0, -density/lst[5]): self.output(sidelen, j, Op.moveto, j, sidelen, Op.lineto, j, 0, Op.moveto, 0, j, Op.lineto) self.output(Op.stroke) self.endStream() self.writeObject(self.hatchObject, hatchDict) def imageObject(self, image): """Return name of an image XObject representing the given image.""" pair = self.images.get(image, None) if pair is not None: return pair[0] name = Name('I%d' % self.nextImage) ob = self.reserveObject('image %d' % self.nextImage) self.nextImage += 1 self.images[image] = (name, ob) return name ## These two from backend_ps.py ## TODO: alpha (SMask, p. 518 of pdf spec) def _rgb(self, im): h,w,s = im.as_rgba_str() rgba = npy.fromstring(s, npy.uint8) rgba.shape = (h, w, 4) rgb = rgba[:,:,:3] a = rgba[:,:,3:] return h, w, rgb.tostring(), a.tostring() def _gray(self, im, rc=0.3, gc=0.59, bc=0.11): rgbat = im.as_rgba_str() rgba = npy.fromstring(rgbat[2], npy.uint8) rgba.shape = (rgbat[0], rgbat[1], 4) rgba_f = rgba.astype(npy.float32) r = rgba_f[:,:,0] g = rgba_f[:,:,1] b = rgba_f[:,:,2] gray = (r*rc + g*gc + b*bc).astype(npy.uint8) return rgbat[0], rgbat[1], gray.tostring() def writeImages(self): for img, pair in self.images.items(): img.flipud_out() if img.is_grayscale: height, width, data = self._gray(img) self.beginStream( pair[1].id, self.reserveObject('length of image stream'), {'Type': Name('XObject'), 'Subtype': Name('Image'), 'Width': width, 'Height': height, 'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 }) self.currentstream.write(data) # TODO: predictors (i.e., output png) self.endStream() else: height, width, data, adata = self._rgb(img) smaskObject = self.reserveObject("smask") stream = self.beginStream( smaskObject.id, self.reserveObject('length of smask stream'), {'Type': Name('XObject'), 'Subtype': Name('Image'), 'Width': width, 'Height': height, 'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 }) self.currentstream.write(adata) # TODO: predictors (i.e., output png) self.endStream() self.beginStream( pair[1].id, self.reserveObject('length of image stream'), {'Type': Name('XObject'), 'Subtype': Name('Image'), 'Width': width, 'Height': height, 'ColorSpace': Name('DeviceRGB'), 'BitsPerComponent': 8, 'SMask': smaskObject}) self.currentstream.write(data) # TODO: predictors (i.e., output png) self.endStream() img.flipud_out() def markerObject(self, path, trans, fillp, lw): """Return name of a marker XObject representing the given path.""" key = (path, trans, fillp is not None, lw) result = self.markers.get(key) if result is None: name = Name('M%d' % len(self.markers)) ob = self.reserveObject('marker %d' % len(self.markers)) self.markers[key] = (name, ob, path, trans, fillp, lw) else: name = result[0] return name def writeMarkers(self): for tup in self.markers.values(): name, object, path, trans, fillp, lw = tup bbox = path.get_extents(trans) bbox = bbox.padded(lw * 0.5) self.beginStream( object.id, None, {'Type': Name('XObject'), 'Subtype': Name('Form'), 'BBox': list(bbox.extents) }) self.writePath(path, trans) if fillp: self.output(Op.fill_stroke) else: self.output(Op.stroke) self.endStream() #@staticmethod def pathOperations(path, transform, simplify=None): tpath = transform.transform_path(path) cmds = [] last_points = None for points, code in tpath.iter_segments(simplify): if code == Path.MOVETO: cmds.extend(points) cmds.append(Op.moveto) elif code == Path.LINETO: cmds.extend(points) cmds.append(Op.lineto) elif code == Path.CURVE3: points = quad2cubic(*(list(last_points[-2:]) + list(points))) cmds.extend(points[2:]) cmds.append(Op.curveto) elif code == Path.CURVE4: cmds.extend(points) cmds.append(Op.curveto) elif code == Path.CLOSEPOLY: cmds.append(Op.closepath) last_points = points return cmds pathOperations = staticmethod(pathOperations) def writePath(self, path, transform): cmds = self.pathOperations( path, transform, self.simplify) self.output(*cmds) def reserveObject(self, name=''): """Reserve an ID for an indirect object. The name is used for debugging in case we forget to print out the object with writeObject. """ id = self.nextObject self.nextObject += 1 self.xrefTable.append([None, 0, name]) return Reference(id) def recordXref(self, id): self.xrefTable[id][0] = self.fh.tell() def writeObject(self, object, contents): self.recordXref(object.id) object.write(contents, self) def writeXref(self): """Write out the xref table.""" self.startxref = self.fh.tell() self.write("xref\n0 %d\n" % self.nextObject) i = 0 borken = False for offset, generation, name in self.xrefTable: if offset is None: print >>sys.stderr, \ 'No offset for object %d (%s)' % (i, name) borken = True else: self.write("%010d %05d n \n" % (offset, generation)) i += 1 if borken: raise AssertionError, 'Indirect object does not exist' def writeTrailer(self): """Write out the PDF trailer.""" self.write("trailer\n") self.write(pdfRepr( {'Size': self.nextObject, 'Root': self.rootObject, 'Info': self.infoObject })) # Could add 'ID' self.write("\nstartxref\n%d\n%%%%EOF\n" % self.startxref) class RendererPdf(RendererBase): truetype_font_cache = maxdict(50) afm_font_cache = maxdict(50) def __init__(self, file, dpi, image_dpi): RendererBase.__init__(self) self.file = file self.gc = self.new_gc() self.file.used_characters = self.used_characters = {} self.mathtext_parser = MathTextParser("Pdf") self.dpi = dpi self.image_dpi = image_dpi self.tex_font_map = None def finalize(self): self.file.output(*self.gc.finalize()) def check_gc(self, gc, fillcolor=None): orig_fill = gc._fillcolor gc._fillcolor = fillcolor delta = self.gc.delta(gc) if delta: self.file.output(*delta) # Restore gc to avoid unwanted side effects gc._fillcolor = orig_fill def tex_font_mapping(self, texfont): if self.tex_font_map is None: self.tex_font_map = \ dviread.PsfontsMap(dviread.find_tex_file('pdftex.map')) return self.tex_font_map[texfont] def track_characters(self, font, s): """Keeps track of which characters are required from each font.""" if isinstance(font, (str, unicode)): fname = font else: fname = font.fname realpath, stat_key = get_realpath_and_stat(fname) used_characters = self.used_characters.setdefault( stat_key, (realpath, set())) used_characters[1].update([ord(x) for x in s]) def merge_used_characters(self, other): for stat_key, (realpath, charset) in other.items(): used_characters = self.used_characters.setdefault( stat_key, (realpath, set())) used_characters[1].update(charset) def get_image_magnification(self): return self.image_dpi/72.0 def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None): # MGDTODO: Support clippath here gc = self.new_gc() if bbox is not None: gc.set_clip_rectangle(bbox) self.check_gc(gc) h, w = im.get_size_out() h, w = 72.0*h/self.image_dpi, 72.0*w/self.image_dpi imob = self.file.imageObject(im) self.file.output(Op.gsave, w, 0, 0, h, x, y, Op.concat_matrix, imob, Op.use_xobject, Op.grestore) def draw_path(self, gc, path, transform, rgbFace=None): self.check_gc(gc, rgbFace) stream = self.file.writePath(path, transform) self.file.output(self.gc.paint()) def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None): self.check_gc(gc, rgbFace) fillp = rgbFace is not None output = self.file.output marker = self.file.markerObject( marker_path, marker_trans, fillp, self.gc._linewidth) tpath = trans.transform_path(path) output(Op.gsave) lastx, lasty = 0, 0 for vertices, code in tpath.iter_segments(): if len(vertices): x, y = vertices[-2:] dx, dy = x - lastx, y - lasty output(1, 0, 0, 1, dx, dy, Op.concat_matrix, marker, Op.use_xobject) lastx, lasty = x, y output(Op.grestore) def _setup_textpos(self, x, y, descent, angle, oldx=0, oldy=0, olddescent=0, oldangle=0): if angle == oldangle == 0: self.file.output(x - oldx, (y + descent) - (oldy + olddescent), Op.textpos) else: angle = angle / 180.0 * pi self.file.output( cos(angle), sin(angle), -sin(angle), cos(angle), x, y, Op.textmatrix) self.file.output(0, descent, Op.textpos) def draw_mathtext(self, gc, x, y, s, prop, angle): # TODO: fix positioning and encoding width, height, descent, glyphs, rects, used_characters = \ self.mathtext_parser.parse(s, self.dpi, prop) self.merge_used_characters(used_characters) # When using Type 3 fonts, we can't use character codes higher # than 255, so we use the "Do" command to render those # instead. global_fonttype = rcParams['pdf.fonttype'] # Set up a global transformation matrix for the whole math expression a = angle / 180.0 * pi self.file.output(Op.gsave) self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y, Op.concat_matrix) self.check_gc(gc, gc._rgb) self.file.output(Op.begin_text) prev_font = None, None oldx, oldy = 0, 0 for ox, oy, fontname, fontsize, num, symbol_name in glyphs: if is_opentype_cff_font(fontname): fonttype = 42 else: fonttype = global_fonttype if fonttype == 42 or num <= 255: self._setup_textpos(ox, oy, 0, 0, oldx, oldy) oldx, oldy = ox, oy if (fontname, fontsize) != prev_font: fontsize *= self.dpi/72.0 self.file.output(self.file.fontName(fontname), fontsize, Op.selectfont) prev_font = fontname, fontsize self.file.output(self.encode_string(unichr(num), fonttype), Op.show) self.file.output(Op.end_text) # If using Type 3 fonts, render all of the multi-byte characters # as XObjects using the 'Do' command. if global_fonttype == 3: for ox, oy, fontname, fontsize, num, symbol_name in glyphs: fontsize *= self.dpi/72.0 if is_opentype_cff_font(fontname): fonttype = 42 else: fonttype = global_fonttype if fonttype == 3 and num > 255: self.file.fontName(fontname) self.file.output(Op.gsave, 0.001 * fontsize, 0, 0, 0.001 * fontsize, ox, oy, Op.concat_matrix) name = self.file._get_xobject_symbol_name( fontname, symbol_name) self.file.output(Name(name), Op.use_xobject) self.file.output(Op.grestore) # Draw any horizontal lines in the math layout for ox, oy, width, height in rects: self.file.output(Op.gsave, ox, oy, width, height, Op.rectangle, Op.fill, Op.grestore) # Pop off the global transformation self.file.output(Op.grestore) def draw_tex(self, gc, x, y, s, prop, angle): texmanager = self.get_texmanager() fontsize = prop.get_size_in_points() dvifile = texmanager.make_dvi(s, fontsize) dvi = dviread.Dvi(dvifile, self.dpi) page = iter(dvi).next() dvi.close() # Gather font information and do some setup for combining # characters into strings. oldfont, seq = None, [] for x1, y1, dvifont, glyph, width in page.text: if dvifont != oldfont: psfont = self.tex_font_mapping(dvifont.texname) pdfname = self.file.fontName(psfont.filename) if self.file.fontInfo.get(pdfname, None) is None: self.file.fontInfo[pdfname] = Bunch( encodingfile=psfont.encoding, widths=dvifont.widths, dvifont=dvifont) seq += [['font', pdfname, dvifont.size]] oldfont = dvifont seq += [['text', x1, y1, [chr(glyph)], x1+width]] # Find consecutive text strings with constant x coordinate and # combine into a sequence of strings and kerns, or just one # string (if any kerns would be less than 0.1 points). i, curx = 0, 0 while i < len(seq)-1: elt, next = seq[i:i+2] if elt[0] == next[0] == 'text' and elt[2] == next[2]: offset = elt[4] - next[1] if abs(offset) < 0.1: elt[3][-1] += next[3][0] elt[4] += next[4]-next[1] else: elt[3] += [offset*1000.0/dvifont.size, next[3][0]] elt[4] = next[4] del seq[i+1] continue i += 1 # Create a transform to map the dvi contents to the canvas. mytrans = Affine2D().rotate_deg(angle).translate(x, y) # Output the text. self.check_gc(gc, gc._rgb) self.file.output(Op.begin_text) curx, cury, oldx, oldy = 0, 0, 0, 0 for elt in seq: if elt[0] == 'font': self.file.output(elt[1], elt[2], Op.selectfont) elif elt[0] == 'text': curx, cury = mytrans.transform((elt[1], elt[2])) self._setup_textpos(curx, cury, 0, angle, oldx, oldy) oldx, oldy = curx, cury if len(elt[3]) == 1: self.file.output(elt[3][0], Op.show) else: self.file.output(elt[3], Op.showkern) else: assert False self.file.output(Op.end_text) # Then output the boxes (e.g. variable-length lines of square # roots). boxgc = self.new_gc() boxgc.copy_properties(gc) boxgc.set_linewidth(0) pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] for x1, y1, h, w in page.boxes: path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h], [0,0]], pathops) self.draw_path(boxgc, path, mytrans, gc._rgb) def encode_string(self, s, fonttype): if fonttype == 3: return s.encode('cp1252', 'replace') return s.encode('utf-16be', 'replace') def draw_text(self, gc, x, y, s, prop, angle, ismath=False): # TODO: combine consecutive texts into one BT/ET delimited section # This function is rather complex, since there is no way to # access characters of a Type 3 font with codes > 255. (Type # 3 fonts can not have a CIDMap). Therefore, we break the # string into chunks, where each chunk contains exclusively # 1-byte or exclusively 2-byte characters, and output each # chunk a separate command. 1-byte characters use the regular # text show command (Tj), whereas 2-byte characters use the # use XObject command (Do). If using Type 42 fonts, all of # this complication is avoided, but of course, those fonts can # not be subsetted. self.check_gc(gc, gc._rgb) if ismath: return self.draw_mathtext(gc, x, y, s, prop, angle) fontsize = prop.get_size_in_points() * self.dpi/72.0 if rcParams['pdf.use14corefonts']: font = self._get_font_afm(prop) l, b, w, h = font.get_str_bbox(s) descent = -b * fontsize / 1000 fonttype = 42 else: font = self._get_font_ttf(prop) self.track_characters(font, s) font.set_text(s, 0.0, flags=LOAD_NO_HINTING) descent = font.get_descent() / 64.0 fonttype = rcParams['pdf.fonttype'] # We can't subset all OpenType fonts, so switch to Type 42 # in that case. if is_opentype_cff_font(font.fname): fonttype = 42 def check_simple_method(s): """Determine if we should use the simple or woven method to output this text, and chunks the string into 1-byte and 2-byte sections if necessary.""" use_simple_method = True chunks = [] if not rcParams['pdf.use14corefonts']: if fonttype == 3 and not isinstance(s, str) and len(s) != 0: # Break the string into chunks where each chunk is either # a string of chars <= 255, or a single character > 255. s = unicode(s) for c in s: if ord(c) <= 255: char_type = 1 else: char_type = 2 if len(chunks) and chunks[-1][0] == char_type: chunks[-1][1].append(c) else: chunks.append((char_type, [c])) use_simple_method = (len(chunks) == 1 and chunks[-1][0] == 1) return use_simple_method, chunks def draw_text_simple(): """Outputs text using the simple method.""" self.file.output(Op.begin_text, self.file.fontName(prop), fontsize, Op.selectfont) self._setup_textpos(x, y, descent, angle) self.file.output(self.encode_string(s, fonttype), Op.show, Op.end_text) def draw_text_woven(chunks): """Outputs text using the woven method, alternating between chunks of 1-byte characters and 2-byte characters. Only used for Type 3 fonts.""" chunks = [(a, ''.join(b)) for a, b in chunks] cmap = font.get_charmap() # Do the rotation and global translation as a single matrix # concatenation up front self.file.output(Op.gsave) a = angle / 180.0 * pi self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y, Op.concat_matrix) # Output all the 1-byte characters in a BT/ET group, then # output all the 2-byte characters. for mode in (1, 2): newx = oldx = 0 olddescent = 0 # Output a 1-byte character chunk if mode == 1: self.file.output(Op.begin_text, self.file.fontName(prop), fontsize, Op.selectfont) for chunk_type, chunk in chunks: if mode == 1 and chunk_type == 1: self._setup_textpos(newx, 0, descent, 0, oldx, 0, olddescent, 0) self.file.output(self.encode_string(chunk, fonttype), Op.show) oldx = newx olddescent = descent lastgind = None for c in chunk: ccode = ord(c) gind = cmap.get(ccode) if gind is not None: if mode == 2 and chunk_type == 2: glyph_name = font.get_glyph_name(gind) self.file.output(Op.gsave) self.file.output(0.001 * fontsize, 0, 0, 0.001 * fontsize, newx, 0, Op.concat_matrix) name = self.file._get_xobject_symbol_name( font.fname, glyph_name) self.file.output(Name(name), Op.use_xobject) self.file.output(Op.grestore) # Move the pointer based on the character width # and kerning glyph = font.load_char(ccode, flags=LOAD_NO_HINTING) if lastgind is not None: kern = font.get_kerning( lastgind, gind, KERNING_UNFITTED) else: kern = 0 lastgind = gind newx += kern/64.0 + glyph.linearHoriAdvance/65536.0 if mode == 1: self.file.output(Op.end_text) self.file.output(Op.grestore) use_simple_method, chunks = check_simple_method(s) if use_simple_method: return draw_text_simple() else: return draw_text_woven(chunks) def get_text_width_height_descent(self, s, prop, ismath): if rcParams['text.usetex']: texmanager = self.get_texmanager() fontsize = prop.get_size_in_points() dvifile = texmanager.make_dvi(s, fontsize) dvi = dviread.Dvi(dvifile, self.dpi) page = iter(dvi).next() dvi.close() # A total height (including the descent) needs to be returned. return page.width, page.height+page.descent, page.descent if ismath: w, h, d, glyphs, rects, used_characters = \ self.mathtext_parser.parse(s, self.dpi, prop) elif rcParams['pdf.use14corefonts']: font = self._get_font_afm(prop) l, b, w, h, d = font.get_str_bbox_and_descent(s) scale = prop.get_size_in_points() w *= scale h *= scale d *= scale else: font = self._get_font_ttf(prop) font.set_text(s, 0.0, flags=LOAD_NO_HINTING) w, h = font.get_width_height() scale = (1.0 / 64.0) w *= scale h *= scale d = font.get_descent() d *= scale return w, h, d def _get_font_afm(self, prop): key = hash(prop) font = self.afm_font_cache.get(key) if font is None: filename = findfont(prop, fontext='afm') font = self.afm_font_cache.get(filename) if font is None: fh = file(filename) font = AFM(fh) self.afm_font_cache[filename] = font fh.close() self.afm_font_cache[key] = font return font def _get_font_ttf(self, prop): key = hash(prop) font = self.truetype_font_cache.get(key) if font is None: filename = findfont(prop) font = self.truetype_font_cache.get(filename) if font is None: font = FT2Font(str(filename)) self.truetype_font_cache[filename] = font self.truetype_font_cache[key] = font font.clear() font.set_size(prop.get_size_in_points(), self.dpi) return font def flipy(self): return False def get_canvas_width_height(self): return self.file.width / self.dpi, self.file.height / self.dpi def new_gc(self): return GraphicsContextPdf(self.file) class GraphicsContextPdf(GraphicsContextBase): def __init__(self, file): GraphicsContextBase.__init__(self) self._fillcolor = (0.0, 0.0, 0.0) self.file = file self.parent = None def __repr__(self): d = dict(self.__dict__) del d['file'] del d['parent'] return `d` def _strokep(self): return (self._linewidth > 0 and self._alpha > 0 and (len(self._rgb) <= 3 or self._rgb[3] != 0.0)) def _fillp(self): return ((self._fillcolor is not None or self._hatch) and (len(self._fillcolor) <= 3 or self._fillcolor[3] != 0.0)) def close_and_paint(self): if self._strokep(): if self._fillp(): return Op.close_fill_stroke else: return Op.close_stroke else: if self._fillp(): return Op.fill else: return Op.endpath def paint(self): if self._strokep(): if self._fillp(): return Op.fill_stroke else: return Op.stroke else: if self._fillp(): return Op.fill else: return Op.endpath capstyles = { 'butt': 0, 'round': 1, 'projecting': 2 } joinstyles = { 'miter': 0, 'round': 1, 'bevel': 2 } def capstyle_cmd(self, style): return [self.capstyles[style], Op.setlinecap] def joinstyle_cmd(self, style): return [self.joinstyles[style], Op.setlinejoin] def linewidth_cmd(self, width): return [width, Op.setlinewidth] def dash_cmd(self, dashes): offset, dash = dashes if dash is None: dash = [] offset = 0 return [list(dash), offset, Op.setdash] def alpha_cmd(self, alpha): name = self.file.alphaState(alpha) return [name, Op.setgstate] def hatch_cmd(self, hatch): if not hatch: if self._fillcolor is not None: return self.fillcolor_cmd(self._fillcolor) else: return [Name('DeviceRGB'), Op.setcolorspace_nonstroke] else: hatch = hatch.lower() lst = ( self._rgb, self._fillcolor, hatch.count('-') + hatch.count('+'), hatch.count('/') + hatch.count('x'), hatch.count('|') + hatch.count('+'), hatch.count('\\') + hatch.count('x') ) name = self.file.hatchPattern(lst) return [Name('Pattern'), Op.setcolorspace_nonstroke, name, Op.setcolor_nonstroke] def rgb_cmd(self, rgb): if rcParams['pdf.inheritcolor']: return [] if rgb[0] == rgb[1] == rgb[2]: return [rgb[0], Op.setgray_stroke] else: return list(rgb[:3]) + [Op.setrgb_stroke] def fillcolor_cmd(self, rgb): if rgb is None or rcParams['pdf.inheritcolor']: return [] elif rgb[0] == rgb[1] == rgb[2]: return [rgb[0], Op.setgray_nonstroke] else: return list(rgb[:3]) + [Op.setrgb_nonstroke] def push(self): parent = GraphicsContextPdf(self.file) parent.copy_properties(self) parent.parent = self.parent self.parent = parent return [Op.gsave] def pop(self): assert self.parent is not None self.copy_properties(self.parent) self.parent = self.parent.parent return [Op.grestore] def clip_cmd(self, cliprect, clippath): """Set clip rectangle. Calls self.pop() and self.push().""" cmds = [] # Pop graphics state until we hit the right one or the stack is empty while (self._cliprect, self._clippath) != (cliprect, clippath) \ and self.parent is not None: cmds.extend(self.pop()) # Unless we hit the right one, set the clip polygon if (self._cliprect, self._clippath) != (cliprect, clippath): cmds.extend(self.push()) if self._cliprect != cliprect: cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath]) if self._clippath != clippath: cmds.extend( PdfFile.pathOperations( *clippath.get_transformed_path_and_affine()) + [Op.clip, Op.endpath]) return cmds commands = ( (('_cliprect', '_clippath'), clip_cmd), # must come first since may pop (('_alpha',), alpha_cmd), (('_capstyle',), capstyle_cmd), (('_fillcolor',), fillcolor_cmd), (('_joinstyle',), joinstyle_cmd), (('_linewidth',), linewidth_cmd), (('_dashes',), dash_cmd), (('_rgb',), rgb_cmd), (('_hatch',), hatch_cmd), # must come after fillcolor and rgb ) # TODO: _linestyle def delta(self, other): """ Copy properties of other into self and return PDF commands needed to transform self into other. """ cmds = [] for params, cmd in self.commands: different = False for p in params: ours = getattr(self, p) theirs = getattr(other, p) try: different = bool(ours != theirs) except ValueError: ours = npy.asarray(ours) theirs = npy.asarray(theirs) different = ours.shape != theirs.shape or npy.any(ours != theirs) if different: break if different: theirs = [getattr(other, p) for p in params] cmds.extend(cmd(self, *theirs)) for p in params: setattr(self, p, getattr(other, p)) return cmds def copy_properties(self, other): """ Copy properties of other into self. """ GraphicsContextBase.copy_properties(self, other) self._fillcolor = other._fillcolor def finalize(self): """ Make sure every pushed graphics state is popped. """ cmds = [] while self.parent is not None: cmds.extend(self.pop()) return cmds ######################################################################## # # The following functions and classes are for pylab and implement # window/figure managers, etc... # ######################################################################## def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ # if a main-level app must be created, this is the usual place to # do it -- see backend_wx, backend_wxagg and backend_tkagg for # examples. Not all GUIs require explicit instantiation of a # main-level app (egg backend_gtk, backend_gtkagg) for pylab FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) canvas = FigureCanvasPdf(thisFig) manager = FigureManagerPdf(canvas, num) return manager class FigureCanvasPdf(FigureCanvasBase): """ The canvas the figure renders into. Calls the draw and print fig methods, creates the renderers, etc... Public attribute figure - A Figure instance """ def draw(self): pass filetypes = {'pdf': 'Portable Document Format'} def get_default_filetype(self): return 'pdf' def print_pdf(self, filename, **kwargs): ppi = 72 # Postscript points in an inch image_dpi = kwargs.get('dpi', 72) # dpi to use for images self.figure.set_dpi(ppi) width, height = self.figure.get_size_inches() file = PdfFile(width, height, ppi, filename) renderer = MixedModeRenderer( width, height, ppi, RendererPdf(file, ppi, image_dpi)) self.figure.draw(renderer) renderer.finalize() file.close() class FigureManagerPdf(FigureManagerBase): pass FigureManager = FigureManagerPdf
gpl-3.0
Guneet-Dhillon/mxnet
example/kaggle-ndsb1/training_curves.py
52
1879
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. ## based on https://github.com/dmlc/mxnet/issues/1302 ## Parses the model fit log file and generates a train/val vs epoch plot import matplotlib.pyplot as plt import numpy as np import re import argparse parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves') parser.add_argument('--log-file', type=str,default="log_tr_va", help='the path of log file') args = parser.parse_args() TR_RE = re.compile('.*?]\sTrain-accuracy=([\d\.]+)') VA_RE = re.compile('.*?]\sValidation-accuracy=([\d\.]+)') log = open(args.log_file).read() log_tr = [float(x) for x in TR_RE.findall(log)] log_va = [float(x) for x in VA_RE.findall(log)] idx = np.arange(len(log_tr)) plt.figure(figsize=(8, 6)) plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.plot(idx, log_tr, 'o', linestyle='-', color="r", label="Train accuracy") plt.plot(idx, log_va, 'o', linestyle='-', color="b", label="Validation accuracy") plt.legend(loc="best") plt.xticks(np.arange(min(idx), max(idx)+1, 5)) plt.yticks(np.arange(0, 1, 0.2)) plt.ylim([0,1]) plt.show()
apache-2.0
jplourenco/bokeh
bokeh/_legacy_charts/builder/tests/test_timeseries_builder.py
6
2807
""" This is the Bokeh charts testing interface. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import absolute_import from collections import OrderedDict import datetime import unittest import numpy as np from numpy.testing import assert_array_equal import pandas as pd from bokeh._legacy_charts import TimeSeries from ._utils import create_chart #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class TestTimeSeries(unittest.TestCase): def test_supported_input(self): now = datetime.datetime.now() delta = datetime.timedelta(minutes=1) dts = [now + delta*i for i in range(5)] xyvalues = OrderedDict({'Date': dts}) y_python = xyvalues['python'] = [2, 3, 7, 5, 26] y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126] y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26] xyvaluesdf = pd.DataFrame(xyvalues) groups = ['python', 'pypy', 'jython'] for i, _xy in enumerate([xyvalues, xyvaluesdf]): ts = create_chart(TimeSeries, _xy, index='Date') builder = ts._builders[0] self.assertEqual(builder._groups, groups) assert_array_equal(builder._data['x_python'], dts) assert_array_equal(builder._data['x_pypy'], dts) assert_array_equal(builder._data['x_jython'], dts) assert_array_equal(builder._data['y_python'], y_python) assert_array_equal(builder._data['y_pypy'], y_pypy) assert_array_equal(builder._data['y_jython'], y_jython) lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]] for _xy in [lvalues, np.array(lvalues)]: hm = create_chart(TimeSeries, _xy, index=dts) builder = hm._builders[0] self.assertEqual(builder._groups, ['0', '1', '2']) assert_array_equal(builder._data['x_0'], dts) assert_array_equal(builder._data['x_1'], dts) assert_array_equal(builder._data['x_2'], dts) assert_array_equal(builder._data['y_0'], y_python) assert_array_equal(builder._data['y_1'], y_pypy) assert_array_equal(builder._data['y_2'], y_jython)
bsd-3-clause
dimkal/mne-python
examples/visualization/plot_evoked_topomap.py
18
1498
""" ======================================== Plotting topographic maps of evoked data ======================================== Load evoked data and plot topomaps for selected time points. """ # Authors: Christian Brodbeck <christianbrodbeck@nyu.edu> # Tal Linzen <linzen@nyu.edu> # Denis A. Engeman <denis.engemann@gmail.com> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt from mne.datasets import sample from mne import read_evokeds print(__doc__) path = sample.data_path() fname = path + '/MEG/sample/sample_audvis-ave.fif' # load evoked and subtract baseline condition = 'Left Auditory' evoked = read_evokeds(fname, condition=condition, baseline=(None, 0)) # set time instants in seconds (from 50 to 150ms in a step of 10ms) times = np.arange(0.05, 0.15, 0.01) # If times is set to None only 10 regularly spaced topographies will be shown # plot magnetometer data as topomaps evoked.plot_topomap(times, ch_type='mag') # compute a 50 ms bin to stabilize topographies evoked.plot_topomap(times, ch_type='mag', average=0.05) # plot gradiometer data (plots the RMS for each pair of gradiometers) evoked.plot_topomap(times, ch_type='grad') # plot magnetometer data as topomap at 1 time point : 100 ms # and add channel labels and title evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False, size=6, res=128, title='Auditory response') plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
bsd-3-clause
jereze/scikit-learn
examples/linear_model/plot_theilsen.py
232
3615
""" ==================== Theil-Sen Regression ==================== Computes a Theil-Sen Regression on a synthetic dataset. See :ref:`theil_sen_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the Theil-Sen estimator is robust against outliers. It has a breakdown point of about 29.3% in case of a simple linear regression which means that it can tolerate arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional case. The estimation of the model is done by calculating the slopes and intercepts of a subpopulation of all possible combinations of p subsample points. If an intercept is fitted, p must be greater than or equal to n_features + 1. The final slope and intercept is then defined as the spatial median of these slopes and intercepts. In certain cases Theil-Sen performs better than :ref:`RANSAC <ransac_regression>` which is also a robust method. This is illustrated in the second example below where outliers with respect to the x-axis perturb RANSAC. Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in general a priori knowledge about the data and the nature of the outliers is needed. Due to the computational complexity of Theil-Sen it is recommended to use it only for small problems in terms of number of samples and features. For larger problems the ``max_subpopulation`` parameter restricts the magnitude of all possible combinations of p subsample points to a randomly chosen subset and therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger problems with the drawback of losing some of its mathematical properties since it then works on a random subset. """ # Author: Florian Wilhelm -- <florian.wilhelm@gmail.com> # License: BSD 3 clause import time import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression, TheilSenRegressor from sklearn.linear_model import RANSACRegressor print(__doc__) estimators = [('OLS', LinearRegression()), ('Theil-Sen', TheilSenRegressor(random_state=42)), ('RANSAC', RANSACRegressor(random_state=42)), ] ############################################################################## # Outliers only in the y direction np.random.seed(0) n_samples = 200 # Linear model y = 3*x + N(2, 0.1**2) x = np.random.randn(n_samples) w = 3. c = 2. noise = 0.1 * np.random.randn(n_samples) y = w * x + c + noise # 10% outliers y[-20:] += -20 * x[-20:] X = x[:, np.newaxis] plt.plot(x, y, 'k+', mew=2, ms=8) line_x = np.array([-3, 3]) for name, estimator in estimators: t0 = time.time() estimator.fit(X, y) elapsed_time = time.time() - t0 y_pred = estimator.predict(line_x.reshape(2, 1)) plt.plot(line_x, y_pred, label='%s (fit time: %.2fs)' % (name, elapsed_time)) plt.axis('tight') plt.legend(loc='upper left') ############################################################################## # Outliers in the X direction np.random.seed(0) # Linear model y = 3*x + N(2, 0.1**2) x = np.random.randn(n_samples) noise = 0.1 * np.random.randn(n_samples) y = 3 * x + 2 + noise # 10% outliers x[-20:] = 9.9 y[-20:] += 22 X = x[:, np.newaxis] plt.figure() plt.plot(x, y, 'k+', mew=2, ms=8) line_x = np.array([-3, 10]) for name, estimator in estimators: t0 = time.time() estimator.fit(X, y) elapsed_time = time.time() - t0 y_pred = estimator.predict(line_x.reshape(2, 1)) plt.plot(line_x, y_pred, label='%s (fit time: %.2fs)' % (name, elapsed_time)) plt.axis('tight') plt.legend(loc='upper left') plt.show()
bsd-3-clause
flightgong/scikit-learn
benchmarks/bench_plot_lasso_path.py
301
4003
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path from sklearn.linear_model import lasso_path from sklearn.datasets.samples_generator import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features / 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 2000, 5).astype(np.int) features_range = np.linspace(10, 2000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) #ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) #ax.legend() i += 1 plt.show()
bsd-3-clause
benjaminwilson/word2vec-norm-experiments
article_generate_cosine_similarity.py
1
1368
import numpy as np import pandas as pd import sys from parameters import * from functions import * vectors_syn0_filename = sys.argv[1] word = sys.argv[2] # e.g. 'the' def row_normalise_dataframe(df): matrix = df.as_matrix() * 1. norms = np.sqrt((matrix ** 2).sum(axis=1)) normed_mat = matrix / norms[:, np.newaxis] return pd.DataFrame(normed_mat, index=df.index, columns=df.columns) vectors = row_normalise_dataframe(load_word2vec_binary(vectors_syn0_filename)) # e.g. 'THE_1' ... tokens = [build_experiment_token(word, i) for i in range(1, max(word_freq_experiment_ratio, word_freq_experiment_power_max) + 1)] tokens = [idx for idx in tokens if idx in vectors.index] non_experiment_idxs = [idx for idx in vectors.index if idx != idx.upper()] # all non uppercase words are non experiment words non_experiment_vectors = vectors.loc[non_experiment_idxs] print r'\begin{tabular}{l | c | l}' print r'pseudoword & similarity to \word{%s} & most similar words in unmodified corpus\\' % tokens[0].replace('_', '\_') print '\hline' for token in tokens: cs = vectors.loc[token].dot(vectors.loc[tokens[0]]) bydist = by_distance_from(non_experiment_vectors, vectors.loc[token]) similiar_words = ', '.join(bydist[0:4].index) print r'\word{%s} & %.4f & \word{%s} \\' % (token.replace('_', '\_'), cs, similiar_words) print r'\end{tabular}'
apache-2.0
pgierz/MyPythonModules
basemap_wrappers/basemap_pacific.py
2
1642
import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap def map_pacif(coastlines=True, thisax=plt.gca(), fill_color='aqua'): m = Basemap(projection='cyl', llcrnrlat=-60, urcrnrlat=60, llcrnrlon=-270, urcrnrlon=-60, resolution='c', ax=thisax) if coastlines: m.drawcoastlines() if fill_color is not None: m.fillcontinents(color='black', lake_color='black') m.drawmapboundary(fill_color=fill_color) return m def map_npacif(coastlines=True, thisax=plt.gca(), fill_color='aqua', fancy=False): if fancy: print "fancy version" m = Basemap(projection='ortho', lat_0=60, lon_0=180, resolution='c', ax=thisax) else: m = Basemap(projection='cyl', llcrnrlat=-30, urcrnrlat=90, llcrnrlon=-270, urcrnrlon=-60, resolution='c', ax=thisax) if coastlines: m.drawcoastlines() if fill_color is not None: m.fillcontinents(color='black', lake_color='black') m.drawmapboundary(fill_color=fill_color) return m def map_pacific_enso_region(coastlines=True, thisax=plt.gca(), fill_color='aqua'): m = Basemap(projection='cyl', llcrnrlat=-20, urcrnrlat=20, llcrnrlon=100.0, urcrnrlon=360.0-80.0, resolution='c', ax=thisax) if coastlines: m.drawcoastlines() if fill_color is not None: m.fillcontinents(color='black', lake_color='black') m.drawmapboundary(fill_color=fill_color) return m
gpl-2.0
KelumPerera/Pandas
Create Dataframes_DateTime Formating.py
1
3583
# -*- coding: utf-8 -*- """ Created on Fri May 05 21:25:51 2017 @author: Kelum Perera """ import pandas as pd # Take a 2D array as input to your DataFrame import numpy as np my_2darray = np.array([[1, 2, 3], [4, 5, 6]]) print(pd.DataFrame(my_2darray)) # Take a dictionary as input to your DataFrame my_dict = {1: ['1', '3'], 2: ['1', '2'], 3: ['2', '4']} print(pd.DataFrame(my_dict)) # Take a DataFrame as input to your DataFrame my_df = pd.DataFrame(data=[4,5,6,7], index=range(0,4), columns=['A']) print(pd.DataFrame(my_df)) # Take a Series as input to your DataFrame my_series = pd.Series({"United Kingdom":"London", "India":"New Delhi", "United States":"Washington", "Belgium":"Brussels"}) print(pd.DataFrame(my_series)) # DATE TIME FORMATING # Create a dataframe through a dictionary table = pd.DataFrame(data = {'DateTime':['01-01-17 16:30','01-01-17 16:31','02-01-17 08:45','02-01-17 08:45','02-01-17 10:40','02-01-17 16:40','02-01-17 16:41','02-01-17 16:42','03-01-17 08:45','03-01-17 08:45','03-01-17 10:48'], 'Amount':[1000,2000,1000,1000,50000,4000,5000,9000,4000,5000,20000], 'Ref':['Deduct','Deduct','Add','Add','Add','Transfer','Transfer','Deduct','Add','Add','Deduct'], 'DrCode':[1500,1400,9000,9000,9000,1600,1700,2000,9000,9000,4000], 'CrCode':[9000,9000,1500,1400,3000,2000,2000,9000,1600,1700,9000],}) # List the Columns list(table.columns.values) # check the type of an object, of each columns, of particular column type(table) table.dtypes table['DateTime'].dtype # convert the DateTime field from an object to a datetime - Method 1 table['DateTime1']= pd.to_datetime(table['DateTime']) table['DateTime1'].dtype # convert the DateTime field from an object to a datetime - Method 2 table['DateTime2'] = table['DateTime'].astype('datetime64[ns]') table['DateTime2'].dtype # convert the DateTime field from an object to a datetime - Method 3 import datetime as dt table['DateTime3'] = table['DateTime'].apply(lambda x: dt.datetime.strptime(x,'%d-%m-%y %H:%M')) # or import datetime as dt table['DateTime1'] = table['DateTime'].apply(lambda x: dt.datetime.strptime(x,'%d-%m-%Y %H:%M:%S')) # Delete column- Method 1 del table['DateTime2'] # Delete column- Method 2 table = table.drop('DateTime2', 1) # where 1 is the axis number (0 for rows and 1 for columns.) table.drop('DateTime2',axis=1,inplace=True) # To delete the column without having to reassign df you can do table.drop(table.columns[[6,7]], axis=1) # to drop by column number instead of by column label # Nearest 5th minute def round_to_5min(t): delta = datetime.timedelta(minutes=t.minute%5, seconds=t.second, microseconds=t.microsecond) t -= delta if delta > datetime.timedelta(minutes=2.5): t += datetime.timedelta(minutes=5) return t table['ns5MinDateTime'] = table.DateTime1.map(round_to_5min) # Nearest 10th minute def round_to_10min(t): delta = datetime.timedelta(minutes=t.minute%10, seconds=t.second, microseconds=t.microsecond) t -= delta if delta > datetime.timedelta(minutes=5): t += datetime.timedelta(minutes=10) return t table['ns10MinDateTime'] = table.DateTime1.map(round_to_10min)
gpl-3.0
chuan9/chromium-crosswalk
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
39
11336
#!/usr/bin/python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Do all the steps required to build and test against nacl.""" import optparse import os.path import re import shutil import subprocess import sys import find_chrome THIS_DIR = os.path.abspath(os.path.dirname(__file__)) CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..')) NACL_DIR = os.path.join(CHROMIUM_DIR, 'native_client') sys.path.append(os.path.join(CHROMIUM_DIR, 'build')) sys.path.append(NACL_DIR) import detect_host_arch import pynacl.platform # Copied from buildbot/buildbot_lib.py def TryToCleanContents(path, file_name_filter=lambda fn: True): """ Remove the contents of a directory without touching the directory itself. Ignores all failures. """ if os.path.exists(path): for fn in os.listdir(path): TryToCleanPath(os.path.join(path, fn), file_name_filter) # Copied from buildbot/buildbot_lib.py def TryToCleanPath(path, file_name_filter=lambda fn: True): """ Removes a file or directory. Ignores all failures. """ if os.path.exists(path): if file_name_filter(path): print 'Trying to remove %s' % path if os.path.isdir(path): shutil.rmtree(path, ignore_errors=True) else: try: os.remove(path) except Exception: pass else: print 'Skipping %s' % path # TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem. def CleanTempDir(): # Only delete files and directories like: # a) C:\temp\83C4.tmp # b) /tmp/.org.chromium.Chromium.EQrEzl file_name_re = re.compile( r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$') file_name_filter = lambda fn: file_name_re.search(fn) is not None path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp')) if len(path) >= 4 and os.path.isdir(path): print print "Cleaning out the temp directory." print TryToCleanContents(path, file_name_filter) else: print print "Cannot find temp directory, not cleaning it." print def RunCommand(cmd, cwd, env): sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd)) sys.stdout.flush() retcode = subprocess.call(cmd, cwd=cwd, env=env) if retcode != 0: sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd)) sys.exit(retcode) def RunTests(name, cmd, env): sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name) RunCommand(cmd + ['do_not_run_tests=1', '-j8'], NACL_DIR, env) sys.stdout.write('\n\nRunning %s tests...\n\n' % name) RunCommand(cmd, NACL_DIR, env) def BuildAndTest(options): # Refuse to run under cygwin. if sys.platform == 'cygwin': raise Exception('I do not work under cygwin, sorry.') # By default, use the version of Python is being used to run this script. python = sys.executable if sys.platform == 'darwin': # Mac 10.5 bots tend to use a particularlly old version of Python, look for # a newer version. macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python' if os.path.exists(macpython27): python = macpython27 os_name = pynacl.platform.GetOS() arch_name = pynacl.platform.GetArch() toolchain_dir = os.path.join(NACL_DIR, 'toolchain', '%s_%s' % (os_name, arch_name)) nacl_newlib_dir = os.path.join(toolchain_dir, 'nacl_%s_newlib' % arch_name) nacl_glibc_dir = os.path.join(toolchain_dir, 'nacl_%s_glibc' % arch_name) pnacl_newlib_dir = os.path.join(toolchain_dir, 'pnacl_newlib') # Decide platform specifics. if options.browser_path: chrome_filename = options.browser_path else: chrome_filename = find_chrome.FindChrome(CHROMIUM_DIR, [options.mode]) if chrome_filename is None: raise Exception('Cannot find a chrome binary - specify one with ' '--browser_path?') env = dict(os.environ) if sys.platform in ['win32', 'cygwin']: if options.bits == 64: bits = 64 elif options.bits == 32: bits = 32 elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \ '64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''): bits = 64 else: bits = 32 msvs_path = ';'.join([ r'c:\Program Files\Microsoft Visual Studio 9.0\VC', r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC', r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools', r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools', r'c:\Program Files\Microsoft Visual Studio 8\VC', r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC', r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools', r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools', ]) env['PATH'] += ';' + msvs_path scons = [python, 'scons.py'] elif sys.platform == 'darwin': if options.bits == 64: bits = 64 elif options.bits == 32: bits = 32 else: p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE) (p_stdout, _) = p.communicate() assert p.returncode == 0 if p_stdout.find('executable x86_64') >= 0: bits = 64 else: bits = 32 scons = [python, 'scons.py'] else: if options.bits == 64: bits = 64 elif options.bits == 32: bits = 32 elif '64' in detect_host_arch.HostArch(): bits = 64 else: bits = 32 # xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap # the entire build step rather than each test (browser_headless=1). # We also need to make sure that there are at least 24 bits per pixel. # https://code.google.com/p/chromium/issues/detail?id=316687 scons = [ 'xvfb-run', '--auto-servernum', '--server-args', '-screen 0 1024x768x24', python, 'scons.py', ] if options.jobs > 1: scons.append('-j%d' % options.jobs) scons.append('disable_tests=%s' % options.disable_tests) if options.buildbot is not None: scons.append('buildbot=%s' % (options.buildbot,)) # Clean the output of the previous build. # Incremental builds can get wedged in weird ways, so we're trading speed # for reliability. shutil.rmtree(os.path.join(NACL_DIR, 'scons-out'), True) # check that the HOST (not target) is 64bit # this is emulating what msvs_env.bat is doing if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \ '64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''): # 64bit HOST env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\' 'Microsoft Visual Studio 9.0\\Common7\\Tools\\') env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\' 'Microsoft Visual Studio 8.0\\Common7\\Tools\\') else: # 32bit HOST env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\' 'Common7\\Tools\\') env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\' 'Common7\\Tools\\') # Run nacl/chrome integration tests. # Note that we have to add nacl_irt_test to --mode in order to get # inbrowser_test_runner to run. # TODO(mseaborn): Change it so that inbrowser_test_runner is not a # special case. cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits, '--mode=opt-host,nacl,nacl_irt_test', 'chrome_browser_path=%s' % chrome_filename, 'nacl_newlib_dir=%s' % nacl_newlib_dir, 'nacl_glibc_dir=%s' % nacl_glibc_dir, 'pnacl_newlib_dir=%s' % pnacl_newlib_dir, ] if not options.integration_bot and not options.morenacl_bot: cmd.append('disable_flaky_tests=1') cmd.append('chrome_browser_tests') # Propagate path to JSON output if present. # Note that RunCommand calls sys.exit on errors, so potential errors # from one command won't be overwritten by another one. Overwriting # a successful results file with either success or failure is fine. if options.json_build_results_output_file: cmd.append('json_build_results_output_file=%s' % options.json_build_results_output_file) CleanTempDir() if options.enable_newlib: RunTests('nacl-newlib', cmd, env) if options.enable_glibc: RunTests('nacl-glibc', cmd + ['--nacl_glibc'], env) def MakeCommandLineParser(): parser = optparse.OptionParser() parser.add_option('-m', '--mode', dest='mode', default='Debug', help='Debug/Release mode') parser.add_option('-j', dest='jobs', default=1, type='int', help='Number of parallel jobs') parser.add_option('--enable_newlib', dest='enable_newlib', default=-1, type='int', help='Run newlib tests?') parser.add_option('--enable_glibc', dest='enable_glibc', default=-1, type='int', help='Run glibc tests?') parser.add_option('--json_build_results_output_file', help='Path to a JSON file for machine-readable output.') # Deprecated, but passed to us by a script in the Chrome repo. # Replaced by --enable_glibc=0 parser.add_option('--disable_glibc', dest='disable_glibc', action='store_true', default=False, help='Do not test using glibc.') parser.add_option('--disable_tests', dest='disable_tests', type='string', default='', help='Comma-separated list of tests to omit') builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '') is_integration_bot = 'nacl-chrome' in builder_name parser.add_option('--integration_bot', dest='integration_bot', type='int', default=int(is_integration_bot), help='Is this an integration bot?') is_morenacl_bot = ( 'More NaCl' in builder_name or 'naclmore' in builder_name) parser.add_option('--morenacl_bot', dest='morenacl_bot', type='int', default=int(is_morenacl_bot), help='Is this a morenacl bot?') # Not used on the bots, but handy for running the script manually. parser.add_option('--bits', dest='bits', action='store', type='int', default=None, help='32/64') parser.add_option('--browser_path', dest='browser_path', action='store', type='string', default=None, help='Path to the chrome browser.') parser.add_option('--buildbot', dest='buildbot', action='store', type='string', default=None, help='Value passed to scons as buildbot= option.') return parser def Main(): parser = MakeCommandLineParser() options, args = parser.parse_args() if options.integration_bot and options.morenacl_bot: parser.error('ERROR: cannot be both an integration bot and a morenacl bot') # Set defaults for enabling newlib. if options.enable_newlib == -1: options.enable_newlib = 1 # Set defaults for enabling glibc. if options.enable_glibc == -1: if options.integration_bot or options.morenacl_bot: options.enable_glibc = 1 else: options.enable_glibc = 0 if args: parser.error('ERROR: invalid argument') BuildAndTest(options) if __name__ == '__main__': Main()
bsd-3-clause
jsilter/scipy
scipy/stats/_discrete_distns.py
2
20515
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # from __future__ import division, print_function, absolute_import from scipy import special from scipy.special import entr, gammaln as gamln from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh import numpy as np from ._distn_infrastructure import ( rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names) class binom_gen(rv_discrete): """A binomial discrete random variable. %(before_notes)s Notes ----- The probability mass function for `binom` is:: binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k) for ``k`` in ``{0, 1,..., n}``. `binom` takes ``n`` and ``p`` as shape parameters. %(example)s """ def _rvs(self, n, p): return self._random_state.binomial(n, p, self._size) def _argcheck(self, n, p): self.b = n return (n >= 0) & (p >= 0) & (p <= 1) def _logpmf(self, x, n, p): k = floor(x) combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1))) return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p) def _pmf(self, x, n, p): return exp(self._logpmf(x, n, p)) def _cdf(self, x, n, p): k = floor(x) vals = special.bdtr(k, n, p) return vals def _sf(self, x, n, p): k = floor(x) return special.bdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.bdtrik(q, n, p)) vals1 = np.maximum(vals - 1, 0) temp = special.bdtr(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p, moments='mv'): q = 1.0 - p mu = n * p var = n * p * q g1, g2 = None, None if 's' in moments: g1 = (q - p) / sqrt(var) if 'k' in moments: g2 = (1.0 - 6*p*q) / var return mu, var, g1, g2 def _entropy(self, n, p): k = np.r_[0:n + 1] vals = self._pmf(k, n, p) return np.sum(entr(vals), axis=0) binom = binom_gen(name='binom') class bernoulli_gen(binom_gen): """A Bernoulli discrete random variable. %(before_notes)s Notes ----- The probability mass function for `bernoulli` is:: bernoulli.pmf(k) = 1-p if k = 0 = p if k = 1 for ``k`` in ``{0, 1}``. `bernoulli` takes ``p`` as shape parameter. %(example)s """ def _rvs(self, p): return binom_gen._rvs(self, 1, p) def _argcheck(self, p): return (p >= 0) & (p <= 1) def _logpmf(self, x, p): return binom._logpmf(x, 1, p) def _pmf(self, x, p): return binom._pmf(x, 1, p) def _cdf(self, x, p): return binom._cdf(x, 1, p) def _sf(self, x, p): return binom._sf(x, 1, p) def _ppf(self, q, p): return binom._ppf(q, 1, p) def _stats(self, p): return binom._stats(1, p) def _entropy(self, p): return entr(p) + entr(1-p) bernoulli = bernoulli_gen(b=1, name='bernoulli') class nbinom_gen(rv_discrete): """A negative binomial discrete random variable. %(before_notes)s Notes ----- The probability mass function for `nbinom` is:: nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k for ``k >= 0``. `nbinom` takes ``n`` and ``p`` as shape parameters. %(example)s """ def _rvs(self, n, p): return self._random_state.negative_binomial(n, p, self._size) def _argcheck(self, n, p): return (n > 0) & (p >= 0) & (p <= 1) def _pmf(self, x, n, p): return exp(self._logpmf(x, n, p)) def _logpmf(self, x, n, p): coeff = gamln(n+x) - gamln(x+1) - gamln(n) return coeff + n*log(p) + special.xlog1py(x, -p) def _cdf(self, x, n, p): k = floor(x) return special.betainc(n, k+1, p) def _sf_skip(self, x, n, p): # skip because special.nbdtrc doesn't work for 0<n<1 k = floor(x) return special.nbdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.nbdtrik(q, n, p)) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p): Q = 1.0 / p P = Q - 1.0 mu = n*P var = n*P*Q g1 = (Q+P)/sqrt(n*P*Q) g2 = (1.0 + 6*P*Q) / (n*P*Q) return mu, var, g1, g2 nbinom = nbinom_gen(name='nbinom') class geom_gen(rv_discrete): """A geometric discrete random variable. %(before_notes)s Notes ----- The probability mass function for `geom` is:: geom.pmf(k) = (1-p)**(k-1)*p for ``k >= 1``. `geom` takes ``p`` as shape parameter. %(example)s """ def _rvs(self, p): return self._random_state.geometric(p, size=self._size) def _argcheck(self, p): return (p <= 1) & (p >= 0) def _pmf(self, k, p): return np.power(1-p, k-1) * p def _logpmf(self, k, p): return special.xlog1py(k - 1, -p) + log(p) def _cdf(self, x, p): k = floor(x) return -expm1(log1p(-p)*k) def _sf(self, x, p): return np.exp(self._logsf(x, p)) def _logsf(self, x, p): k = floor(x) return k*log1p(-p) def _ppf(self, q, p): vals = ceil(log(1.0-q)/log(1-p)) temp = self._cdf(vals-1, p) return np.where((temp >= q) & (vals > 0), vals-1, vals) def _stats(self, p): mu = 1.0/p qr = 1.0-p var = qr / p / p g1 = (2.0-p) / sqrt(qr) g2 = np.polyval([1, -6, 6], p)/(1.0-p) return mu, var, g1, g2 geom = geom_gen(a=1, name='geom', longname="A geometric") class hypergeom_gen(rv_discrete): """A hypergeometric discrete random variable. The hypergeometric distribution models drawing objects from a bin. M is the total number of objects, n is total number of Type I objects. The random variate represents the number of Type I objects in N drawn without replacement from the total population. %(before_notes)s Notes ----- The probability mass function is defined as:: pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N), for max(0, N - (M-n)) <= k <= min(n, N) Examples -------- >>> from scipy.stats import hypergeom >>> import matplotlib.pyplot as plt Suppose we have a collection of 20 animals, of which 7 are dogs. Then if we want to know the probability of finding a given number of dogs if we choose at random 12 of the 20 animals, we can initialize a frozen distribution and plot the probability mass function: >>> [M, n, N] = [20, 7, 12] >>> rv = hypergeom(M, n, N) >>> x = np.arange(0, n+1) >>> pmf_dogs = rv.pmf(x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, pmf_dogs, 'bo') >>> ax.vlines(x, 0, pmf_dogs, lw=2) >>> ax.set_xlabel('# of dogs in our group of chosen animals') >>> ax.set_ylabel('hypergeom PMF') >>> plt.show() Instead of using a frozen distribution we can also use `hypergeom` methods directly. To for example obtain the cumulative distribution function, use: >>> prb = hypergeom.cdf(x, M, n, N) And to generate random numbers: >>> R = hypergeom.rvs(M, n, N, size=10) """ def _rvs(self, M, n, N): return self._random_state.hypergeometric(n, M-n, N, size=self._size) def _argcheck(self, M, n, N): cond = rv_discrete._argcheck(self, M, n, N) cond &= (n <= M) & (N <= M) self.a = max(N-(M-n), 0) self.b = min(n, N) return cond def _logpmf(self, k, M, n, N): tot, good = M, n bad = tot - good return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \ - gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \ + gamln(N+1) def _pmf(self, k, M, n, N): # same as the following but numerically more precise # return comb(good, k) * comb(bad, N-k) / comb(tot, N) return exp(self._logpmf(k, M, n, N)) def _stats(self, M, n, N): # tot, good, sample_size = M, n, N # "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n') M, n, N = 1.*M, 1.*n, 1.*N m = M - n p = n/M mu = N*p var = m*n*N*(M - N)*1.0/(M*M*(M-1)) g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N))) g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m g2 *= (M-1)*M*M g2 += 6.*n*N*(M-N)*m*(5.*M-6) g2 /= n * N * (M-N) * m * (M-2.) * (M-3.) return mu, var, g1, g2 def _entropy(self, M, n, N): k = np.r_[N - (M - n):min(n, N) + 1] vals = self.pmf(k, M, n, N) return np.sum(entr(vals), axis=0) def _sf(self, k, M, n, N): """More precise calculation, 1 - cdf doesn't cut it.""" # This for loop is needed because `k` can be an array. If that's the # case, the sf() method makes M, n and N arrays of the same shape. We # therefore unpack all inputs args, so we can do the manual # integration. res = [] for quant, tot, good, draw in zip(k, M, n, N): # Manual integration over probability mass function. More accurate # than integrate.quad. k2 = np.arange(quant + 1, draw + 1) res.append(np.sum(self._pmf(k2, tot, good, draw))) return np.asarray(res) hypergeom = hypergeom_gen(name='hypergeom') # FIXME: Fails _cdfvec class logser_gen(rv_discrete): """A Logarithmic (Log-Series, Series) discrete random variable. %(before_notes)s Notes ----- The probability mass function for `logser` is:: logser.pmf(k) = - p**k / (k*log(1-p)) for ``k >= 1``. `logser` takes ``p`` as shape parameter. %(example)s """ def _rvs(self, p): # looks wrong for p>0.5, too few k=1 # trying to use generic is worse, no k=1 at all return self._random_state.logseries(p, size=self._size) def _argcheck(self, p): return (p > 0) & (p < 1) def _pmf(self, k, p): return -np.power(p, k) * 1.0 / k / log(1 - p) def _stats(self, p): r = log(1 - p) mu = p / (p - 1.0) / r mu2p = -p / r / (p - 1.0)**2 var = mu2p - mu*mu mu3p = -p / r * (1.0+p) / (1.0 - p)**3 mu3 = mu3p - 3*mu*mu2p + 2*mu**3 g1 = mu3 / np.power(var, 1.5) mu4p = -p / r * ( 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4) mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 g2 = mu4 / var**2 - 3.0 return mu, var, g1, g2 logser = logser_gen(a=1, name='logser', longname='A logarithmic') class poisson_gen(rv_discrete): """A Poisson discrete random variable. %(before_notes)s Notes ----- The probability mass function for `poisson` is:: poisson.pmf(k) = exp(-mu) * mu**k / k! for ``k >= 0``. `poisson` takes ``mu`` as shape parameter. %(example)s """ def _rvs(self, mu): return self._random_state.poisson(mu, self._size) def _logpmf(self, k, mu): Pk = k*log(mu)-gamln(k+1) - mu return Pk def _pmf(self, k, mu): return exp(self._logpmf(k, mu)) def _cdf(self, x, mu): k = floor(x) return special.pdtr(k, mu) def _sf(self, x, mu): k = floor(x) return special.pdtrc(k, mu) def _ppf(self, q, mu): vals = ceil(special.pdtrik(q, mu)) vals1 = np.maximum(vals - 1, 0) temp = special.pdtr(vals1, mu) return np.where(temp >= q, vals1, vals) def _stats(self, mu): var = mu tmp = np.asarray(mu) g1 = sqrt(1.0 / tmp) g2 = 1.0 / tmp return mu, var, g1, g2 poisson = poisson_gen(name="poisson", longname='A Poisson') class planck_gen(rv_discrete): """A Planck discrete exponential random variable. %(before_notes)s Notes ----- The probability mass function for `planck` is:: planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k) for ``k*lambda_ >= 0``. `planck` takes ``lambda_`` as shape parameter. %(example)s """ def _argcheck(self, lambda_): if (lambda_ > 0): self.a = 0 self.b = np.inf return 1 elif (lambda_ < 0): self.a = -np.inf self.b = 0 return 1 else: return 0 def _pmf(self, k, lambda_): fact = (1-exp(-lambda_)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_): k = floor(x) return 1-exp(-lambda_*(k+1)) def _ppf(self, q, lambda_): vals = ceil(-1.0/lambda_ * log1p(-q)-1) vals1 = (vals-1).clip(self.a, np.inf) temp = self._cdf(vals1, lambda_) return np.where(temp >= q, vals1, vals) def _stats(self, lambda_): mu = 1/(exp(lambda_)-1) var = exp(-lambda_)/(expm1(-lambda_))**2 g1 = 2*cosh(lambda_/2.0) g2 = 4+2*cosh(lambda_) return mu, var, g1, g2 def _entropy(self, lambda_): l = lambda_ C = (1-exp(-l)) return l*exp(-l)/C - log(C) planck = planck_gen(name='planck', longname='A discrete exponential ') class boltzmann_gen(rv_discrete): """A Boltzmann (Truncated Discrete Exponential) random variable. %(before_notes)s Notes ----- The probability mass function for `boltzmann` is:: boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N)) for ``k = 0,..., N-1``. `boltzmann` takes ``lambda_`` and ``N`` as shape parameters. %(example)s """ def _pmf(self, k, lambda_, N): fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_, N): k = floor(x) return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) def _ppf(self, q, lambda_, N): qnew = q*(1-exp(-lambda_*N)) vals = ceil(-1.0/lambda_ * log(1-qnew)-1) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, lambda_, N) return np.where(temp >= q, vals1, vals) def _stats(self, lambda_, N): z = exp(-lambda_) zN = exp(-lambda_*N) mu = z/(1.0-z)-N*zN/(1-zN) var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 trm = (1-zN)/(1-z) trm2 = (z*trm**2 - N*N*zN) g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) g1 = g1 / trm2**(1.5) g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) g2 = g2 / trm2 / trm2 return mu, var, g1, g2 boltzmann = boltzmann_gen(name='boltzmann', longname='A truncated discrete exponential ') class randint_gen(rv_discrete): """A uniform discrete random variable. %(before_notes)s Notes ----- The probability mass function for `randint` is:: randint.pmf(k) = 1./(high - low) for ``k = low, ..., high - 1``. `randint` takes ``low`` and ``high`` as shape parameters. Note the difference to the numpy ``random_integers`` which returns integers on a *closed* interval ``[low, high]``. %(example)s """ def _argcheck(self, low, high): self.a = low self.b = high - 1 return (high > low) def _pmf(self, k, low, high): p = np.ones_like(k) / (high - low) return np.where((k >= low) & (k < high), p, 0.) def _cdf(self, x, low, high): k = floor(x) return (k - low + 1.) / (high - low) def _ppf(self, q, low, high): vals = ceil(q * (high - low) + low) - 1 vals1 = (vals - 1).clip(low, high) temp = self._cdf(vals1, low, high) return np.where(temp >= q, vals1, vals) def _stats(self, low, high): m2, m1 = np.asarray(high), np.asarray(low) mu = (m2 + m1 - 1.0) / 2 d = m2 - m1 var = (d*d - 1) / 12.0 g1 = 0.0 g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0) return mu, var, g1, g2 def _rvs(self, low, high=None): """An array of *size* random integers >= ``low`` and < ``high``. If ``high`` is ``None``, then range is >=0 and < low """ return self._random_state.randint(low, high, self._size) def _entropy(self, low, high): return log(high - low) randint = randint_gen(name='randint', longname='A discrete uniform ' '(random integer)') # FIXME: problems sampling. class zipf_gen(rv_discrete): """A Zipf discrete random variable. %(before_notes)s Notes ----- The probability mass function for `zipf` is:: zipf.pmf(k, a) = 1/(zeta(a) * k**a) for ``k >= 1``. `zipf` takes ``a`` as shape parameter. %(example)s """ def _rvs(self, a): return self._random_state.zipf(a, size=self._size) def _argcheck(self, a): return a > 1 def _pmf(self, k, a): Pk = 1.0 / special.zeta(a, 1) / k**a return Pk def _munp(self, n, a): return _lazywhere( a > n + 1, (a, n), lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1), np.inf) zipf = zipf_gen(a=1, name='zipf', longname='A Zipf') class dlaplace_gen(rv_discrete): """A Laplacian discrete random variable. %(before_notes)s Notes ----- The probability mass function for `dlaplace` is:: dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k)) for ``a > 0``. `dlaplace` takes ``a`` as shape parameter. %(example)s """ def _pmf(self, k, a): return tanh(a/2.0) * exp(-a * abs(k)) def _cdf(self, x, a): k = floor(x) f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1) f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1) return _lazywhere(k >= 0, (k, a), f=f, f2=f2) def _ppf(self, q, a): const = 1 + exp(a) vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1, -log((1-q) * const) / a)) vals1 = vals - 1 return np.where(self._cdf(vals1, a) >= q, vals1, vals) def _stats(self, a): ea = exp(a) mu2 = 2.*ea/(ea-1.)**2 mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4 return 0., mu2, 0., mu4/mu2**2 - 3. def _entropy(self, a): return a / sinh(a) - log(tanh(a/2.0)) dlaplace = dlaplace_gen(a=-np.inf, name='dlaplace', longname='A discrete Laplacian') class skellam_gen(rv_discrete): """A Skellam discrete random variable. %(before_notes)s Notes ----- Probability distribution of the difference of two correlated or uncorrelated Poisson random variables. Let k1 and k2 be two Poisson-distributed r.v. with expected values lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and ``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation coefficient between k1 and k2. If the two Poisson-distributed r.v. are independent then ``rho = 0``. Parameters mu1 and mu2 must be strictly positive. For details see: http://en.wikipedia.org/wiki/Skellam_distribution `skellam` takes ``mu1`` and ``mu2`` as shape parameters. %(example)s """ def _rvs(self, mu1, mu2): n = self._size return (self._random_state.poisson(mu1, n) - self._random_state.poisson(mu2, n)) def _pmf(self, x, mu1, mu2): px = np.where(x < 0, _ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2, _ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2) # ncx2.pdf() returns nan's for extremely low probabilities return px def _cdf(self, x, mu1, mu2): x = floor(x) px = np.where(x < 0, _ncx2_cdf(2*mu2, -2*x, 2*mu1), 1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2)) return px def _stats(self, mu1, mu2): mean = mu1 - mu2 var = mu1 + mu2 g1 = mean / sqrt((var)**3) g2 = 1 / var return mean, var, g1, g2 skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam') # Collect names of classes and objects in this module. pairs = list(globals().items()) _distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) __all__ = _distn_names + _distn_gen_names
bsd-3-clause
anaderi/lhcb_trigger_ml
setup.py
1
1694
from setuptools import setup import codecs with codecs.open('README.rst', encoding='utf-8') as readme_file: long_description = readme_file.read() setup( name="hep_ml", version="0.1.5", description="Machine Learning for High Energy Physics", long_description=long_description, url='https://github.com/anaderi/lhcb_trigger_ml', # Author details author='Alex Rogozhnikov', author_email='axelr@yandex-team.ru', # Choose your license license='MIT', packages=['hep_ml'], package_dir={'hep_ml': 'hep_ml'}, classifiers=[ # Indicate who your project is intended for 'Intended Audience :: CERN, LHC, LHCb experiment, HEP, particle physics', 'Topic :: YDF :: Cern Tools', # Pick your license as you wish (should match "license" above) 'License :: MIT License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 2.7', ], # What does your project relate to? keywords='machine learning, supervised learning, ' 'uncorrelated methods of machine learning, high energy physics, particle physics', # List run-time dependencies here. These will be installed by pip when your project is installed. install_requires = [ 'ipython[all] >= 2.1.0', 'pyzmq >= 14.3.0', 'matplotlib >= 1.4', 'rootpy >= 0.7.1', 'root_numpy >= 3.3.0', 'pandas >= 0.14.0', 'scikit-learn >= 0.15', 'scipy >= 0.14.0', 'numpy >= 1.8.1', 'jinja2 >= 2.7.3', 'six', ], )
mit
tachylyte/HydroGeoPy
EXAMPLES/exampleMonteCarloUni.py
2
1242
# Simple test of probabilistic modelling # Calculates time to break through assuming plug flow from simplehydro import * from monte_carlo import * from conversion import * import matplotlib.pyplot as plt x = 10 # Distance, x (m) n = 0.3 # Effective porosity, n (-) K = 1e-7 # Hydraulic conductivity, K (m/s) H = 1 # Head where x=0 (m) h = 0 # Head where x=x (m) i = gradient2(H, h, x) # Deterministic v = velocity2(K, i, n) Breakthrough = x / v print('Deterministic breakthrough in ' + str(round(secsToDays(Breakthrough), 2)) + ' days') # Probabilistic I = 100001 # Number of iterations pK = Normal(1e-7, 1e-8, I) # Input distrubution for K results = [] for iteration in range(I): results.append(velocity2(pK[iteration], i, n)) results = [round(secsToDays(x/v), 2) for v in results] # Calculate breakthroughs #print(results) # the histogram of the data n, bins, patches = plt.hist(results, 50, normed=1, facecolor='green', alpha=0.75) # add a 'best fit' line #l = plt.plot(bins, results, 'r--', linewidth=1) plt.xlabel('Breakthrough (days)') plt.ylabel('Probability density') plt.axis([0, max(results), 0, 200/I]) plt.grid(True) plt.show()
bsd-2-clause
diogo149/CauseEffectPairsPaper
configs/default_categorical_only.py
1
7287
import numpy as np from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean from sklearn.preprocessing import LabelBinarizer from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score from boomlet.utils.aggregators import to_aggregator from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss from boomlet.transform.type_conversion import Discretizer from autocause.feature_functions import * """ Functions used to combine a list of features into one coherent one. Sample use: 1. to convert categorical to numerical, we perform a one hot encoding 2. treat each binary column as a separate numerical feature 3. compute numerical features as usual 4. use each of the following functions to create a new feature (with the input as the nth feature for each of the columns) WARNING: these will be used in various locations throughout the code base and will result in feature size growing at faster than a linear rate """ AGGREGATORS = [ to_aggregator("max"), to_aggregator("min"), to_aggregator("median"), to_aggregator("mode"), to_aggregator("mean"), # to_aggregator("sum"), ] """ Boolean flags specifying whether or not to perform conversions """ CONVERT_TO_NUMERICAL = False CONVERT_TO_CATEGORICAL = True """ Functions that compute a metric on a single 1-D array """ UNARY_NUMERICAL_FEATURES = [ normalized_entropy, skew, kurtosis, np.std, shapiro, ] UNARY_CATEGORICAL_FEATURES = [ lambda x: len(set(x)), # number of unique ] """ Functions that compute a metric on two 1-D arrays """ BINARY_NN_FEATURES = [ independent_component, chi_square, pearsonr, correlation_magnitude, braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, sqeuclidean, ansari, mood, levene, fligner, bartlett, mannwhitneyu, ] BINARY_NC_FEATURES = [ ] BINARY_CN_FEATURES = [ categorical_numerical_homogeneity, bucket_variance, anova, ] BINARY_CC_FEATURES = [ categorical_categorical_homogeneity, anova, dice_, jaccard, kulsinski, matching, rogerstanimoto_, russellrao, sokalmichener_, sokalsneath_, yule_, adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score, ] """ Dictionaries of input type (e.g. B corresponds to pairs where binary data is the input) to pairs of converter functions and a boolean flag of whether or not to aggregate over the output of the converter function converter functions should have the type signature: converter(X_raw, X_current_type, Y_raw, Y_type) where X_raw is the data to convert """ NUMERICAL_CONVERTERS = dict( N=lambda x, *args: x, # identity function B=lambda x, *args: x, # identity function C=lambda x, *args: LabelBinarizer().fit_transform(x), ) CATEGORICAL_CONVERTERS = dict( N=lambda x, *args: Discretizer().fit_transform(x).flatten(), B=lambda x, *args: x, # identity function C=lambda x, *args: x, # identity function ) """ Whether or not the converters can result in a 2D output. This must be set to True if any of the respective converts can return a 2D output. """ NUMERICAL_CAN_BE_2D = True CATEGORICAL_CAN_BE_2D = False """ Estimators used to provide a fit for a variable """ REGRESSION_ESTIMATORS = [ Ridge(), LinearRegression(), DecisionTreeRegressor(random_state=0), RandomForestRegressor(random_state=0), GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0), KNeighborsRegressor(), ] CLASSIFICATION_ESTIMATORS = [ LogisticRegression(random_state=0), DecisionTreeClassifier(random_state=0), RandomForestClassifier(random_state=0), GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0), KNeighborsClassifier(), GaussianNB(), ] """ Functions to provide a value of how good a fit on a variable is """ REGRESSION_METRICS = [ explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, max_error, error_variance, relative_error_variance, gini_loss, ] + BINARY_NN_FEATURES REGRESSION_RESIDUAL_METRICS = [ ] + UNARY_NUMERICAL_FEATURES BINARY_PROBABILITY_CLASSIFICATION_METRICS = [ roc_auc_score, hinge_loss, ] + REGRESSION_METRICS RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [ ] + REGRESSION_RESIDUAL_METRICS BINARY_CLASSIFICATION_METRICS = [ accuracy_score, average_precision_score, f1_score, matthews_corrcoef, precision_score, recall_score, zero_one_loss, categorical_gini_loss, ] ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification ] + BINARY_CC_FEATURES """ Functions to assess the model (e.g. complexity) of the fit on a numerical variable of type signature: metric(clf, X, y) """ REGRESSION_MODEL_METRICS = [ # TODO model complexity metrics ] CLASSIFICATION_MODEL_METRICS = [ # TODO use regression model metrics on predict_proba ] """ The operations to perform on the A->B features and B->A features. """ RELATIVE_FEATURES = [ # Identity functions, comment out the next 2 lines for only relative features lambda x, y: x, lambda x, y: y, lambda x, y: x - y, ] """ Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A) If this is done and training labels are given, those labels will have to be reflected as well. The reflection is performed through appending at the end. (e.g. if we have N training examples, observation N+1 in the output will be the first example reflected) """ REFLECT_DATA = False """ Whether or not metafeatures based on the types of A and B are generated. e.g. 1/0 feature on whether or not A is Numerical, etc. """ ADD_METAFEATURES = True """ Whether or not to generate combination features between the computed features and metafeatures. e.g. for each feature and metafeature, generate a new feature which is the product of the two WARNING: will generate a LOT of features (approximately 21 times as many) """ COMPUTE_METAFEATURE_COMBINATIONS = False
mit
shikhardb/scikit-learn
sklearn/utils/graph.py
50
6169
""" Graph utilities and algorithms Graphs are represented with their adjacency matrices, preferably using sparse matrices. """ # Authors: Aric Hagberg <hagberg@lanl.gov> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Jake Vanderplas <vanderplas@astro.washington.edu> # License: BSD 3 clause import numpy as np from scipy import sparse from .graph_shortest_path import graph_shortest_path ############################################################################### # Path and connected component analysis. # Code adapted from networkx def single_source_shortest_path_length(graph, source, cutoff=None): """Return the shortest path length from source to all reachable nodes. Returns a dictionary of shortest path lengths keyed by target. Parameters ---------- graph: sparse matrix or 2D array (preferably LIL matrix) Adjacency matrix of the graph source : node label Starting node for path cutoff : integer, optional Depth to stop the search - only paths of length <= cutoff are returned. Examples -------- >>> from sklearn.utils.graph import single_source_shortest_path_length >>> import numpy as np >>> graph = np.array([[ 0, 1, 0, 0], ... [ 1, 0, 1, 0], ... [ 0, 1, 0, 1], ... [ 0, 0, 1, 0]]) >>> single_source_shortest_path_length(graph, 0) {0: 0, 1: 1, 2: 2, 3: 3} >>> single_source_shortest_path_length(np.ones((6, 6)), 2) {0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1} """ if sparse.isspmatrix(graph): graph = graph.tolil() else: graph = sparse.lil_matrix(graph) seen = {} # level (number of hops) when seen in BFS level = 0 # the current level next_level = [source] # dict of nodes to check at next level while next_level: this_level = next_level # advance to next level next_level = set() # and start a new list (fringe) for v in this_level: if v not in seen: seen[v] = level # set the level of vertex v next_level.update(graph.rows[v]) if cutoff is not None and cutoff <= level: break level += 1 return seen # return all path lengths as dictionary if hasattr(sparse, 'connected_components'): connected_components = sparse.connected_components else: from .sparsetools import connected_components ############################################################################### # Graph laplacian def graph_laplacian(csgraph, normed=False, return_diag=False): """ Return the Laplacian matrix of a directed graph. For non-symmetric graphs the out-degree is used in the computation. Parameters ---------- csgraph : array_like or sparse matrix, 2 dimensions compressed-sparse graph, with shape (N, N). normed : bool, optional If True, then compute normalized Laplacian. return_diag : bool, optional If True, then return diagonal as well as laplacian. Returns ------- lap : ndarray The N x N laplacian matrix of graph. diag : ndarray The length-N diagonal of the laplacian matrix. diag is returned only if return_diag is True. Notes ----- The Laplacian matrix of a graph is sometimes referred to as the "Kirchoff matrix" or the "admittance matrix", and is useful in many parts of spectral graph theory. In particular, the eigen-decomposition of the laplacian matrix can give insight into many properties of the graph. For non-symmetric directed graphs, the laplacian is computed using the out-degree of each node. """ if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]: raise ValueError('csgraph must be a square matrix or array') if normed and (np.issubdtype(csgraph.dtype, np.int) or np.issubdtype(csgraph.dtype, np.uint)): csgraph = csgraph.astype(np.float) if sparse.isspmatrix(csgraph): return _laplacian_sparse(csgraph, normed=normed, return_diag=return_diag) else: return _laplacian_dense(csgraph, normed=normed, return_diag=return_diag) def _laplacian_sparse(graph, normed=False, return_diag=False): n_nodes = graph.shape[0] if not graph.format == 'coo': lap = (-graph).tocoo() else: lap = -graph.copy() diag_mask = (lap.row == lap.col) if not diag_mask.sum() == n_nodes: # The sparsity pattern of the matrix has holes on the diagonal, # we need to fix that diag_idx = lap.row[diag_mask] diagonal_holes = list(set(range(n_nodes)).difference(diag_idx)) new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))]) new_row = np.concatenate([lap.row, diagonal_holes]) new_col = np.concatenate([lap.col, diagonal_holes]) lap = sparse.coo_matrix((new_data, (new_row, new_col)), shape=lap.shape) diag_mask = (lap.row == lap.col) lap.data[diag_mask] = 0 w = -np.asarray(lap.sum(axis=1)).squeeze() if normed: w = np.sqrt(w) w_zeros = (w == 0) w[w_zeros] = 1 lap.data /= w[lap.row] lap.data /= w[lap.col] lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype( lap.data.dtype) else: lap.data[diag_mask] = w[lap.row[diag_mask]] if return_diag: return lap, w return lap def _laplacian_dense(graph, normed=False, return_diag=False): n_nodes = graph.shape[0] lap = -np.asarray(graph) # minus sign leads to a copy # set diagonal to zero lap.flat[::n_nodes + 1] = 0 w = -lap.sum(axis=0) if normed: w = np.sqrt(w) w_zeros = (w == 0) w[w_zeros] = 1 lap /= w lap /= w[:, np.newaxis] lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype) else: lap.flat[::n_nodes + 1] = w.astype(lap.dtype) if return_diag: return lap, w return lap
bsd-3-clause
sidnarayanan/BAdNet
train/images/utils.py
1
3654
import numpy as np # import seaborn from collections import namedtuple from keras import backend as K from keras.engine.topology import Layer from scipy.interpolate import interp1d ## Loss functions dice_smooth = 1. def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + dice_smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + dice_smooth) def dice_coef_loss(y_true, y_pred): return -dice_coef(y_true, y_pred) ## Layers and ops ## plotting tools # class H1: # '''Wrapper around numpy histogram # ''' # def __init__(self,hist): # self.bin_edges = hist[1] # self.n_bins = self.bin_edges.shape[0]-1 # self.content = hist[0] # def find_bin(self,x): # if x < self.bin_edges[0]: # return -1 # for ib in self.xrange(self.n_bins): # if x>= self.bin_edges[ib]: # return ib # return self.n_bins # def get_bin(self,ib): # if ib<0 or ib>=self.n_bins: # return 0 # return self.content[ib] # def integral(self,lo=None,hi=None): # if not lo: # lo = 0 # if not hi: # hi = self.n_bins # widths = np.diff(self.bin_edges[lo:hi+1]) # return np.sum(self.content[lo:hi] * widths) # # # def plot_hists(props, hists): # plt.clf() # bins = props['bins'] # for h in hists: # plt.hist(h['vals'], bins=bins, weights=h['weights']/np.sum(h['weights']), # histtype='step', # fill=False, # color=h['color'], label=h['label']) # if 'xlabel' in props: # plt.xlabel(props['xlabel']) # if 'ylabel' in props: # plt.ylabel(props['ylabel']) # plt.legend(loc=0) # plt.savefig(props['output']+'.png',bbox_inches='tight',dpi=300) # plt.savefig(props['output']+'.pdf',bbox_inches='tight') # # # # Tagger = namedtuple('Tagger',['response','name','lo','hi','flip']) # # def create_roc(taggers, labels, weights, output, nbins=50): # colors = ['k','r','g','b'] # plt.clf() # wps = [] # for t in taggers: # color = colors[0] # del colors[0] # h_sig = H1(np.histogram(t.response[labels==1], # weights=weights[labels==1], # bins=nbins,range=(t.lo,t.hi), # density=True)) # h_bkg = H1(np.histogram(t.response[labels==0], # weights=weights[labels==0], # bins=nbins,range=(t.lo,t.hi), # density=True)) # # epsilons_sig = [] # epsilons_bkg = [] # for ib in xrange(nbins): # if t.flip: # esig = h_sig.integral(hi=ib) # ebkg = h_bkg.integral(hi=ib) # else: # esig = h_sig.integral(lo=ib) # ebkg = h_bkg.integral(lo=ib) # epsilons_sig.append(esig) # epsilons_bkg.append(ebkg) # # interp = interp1d(epsilons_bkg, # np.arange(t.lo,t.hi,float(t.hi-t.lo)/nbins)) # wps.append(interp(0.05)) # # plt.plot(epsilons_sig, epsilons_bkg, color+'-',label=t.name) # plt.axis([0,1,0.001,1]) # plt.yscale('log') # plt.legend(loc=0) # plt.ylabel('Background fake rate') # plt.xlabel('Signal efficiency') # plt.savefig(output+'.png',bbox_inches='tight',dpi=300) # plt.savefig(output+'.pdf',bbox_inches='tight') # # return wps
mit
Chandra-MARX/marx-test
marxtest/plot_utils.py
1
2865
from matplotlib import scale as mscale from matplotlib import transforms as mtransforms from matplotlib.ticker import AutoLocator, ScalarFormatter class PowerScale(mscale.ScaleBase): """ Scales values v with with v^pow. """ name = 'power' def __init__(self, axis, **kwargs): """ power: The p in x**p. """ self.power = kwargs.pop("power", 1.) super().__init__(axis, **kwargs) def get_transform(self): """ Override this method to return a new instance that does the actual transformation of the data. """ return self.PowerTransform(self.power) def set_default_locators_and_formatters(self, axis): """ Override to set up the locators and formatters to use with the scale. """ axis.set_major_locator(AutoLocator()) axis.set_major_formatter(ScalarFormatter()) def limit_range_for_scale(self, vmin, vmax, minpos): """ Override to limit the bounds of the axis to the domain of the transform. For power < 1, negative values are not allowed. Unlike the autoscaling provided by the tick locators, this range limiting will always be adhered to, whether the axis range is set manually, determined automatically or changed through panning and zooming. """ if self.power >= 1: return vmin, vmax else: return max(vmin, 0), max(vmax, 0) class PowerTransform(mtransforms.Transform): # There are two value members that must be defined. # ``input_dims`` and ``output_dims`` specify number of input # dimensions and output dimensions to the transformation. # These are used by the transformation framework to do some # error checking and prevent incompatible transformations from # being connected together. When defining transforms for a # scale, which are, by definition, separable and have only one # dimension, these members should always be set to 1. input_dims = 1 output_dims = 1 is_separable = True def __init__(self, power): mtransforms.Transform.__init__(self) self.power = power def transform_non_affine(self, a): """ This transform takes an Nx1 ``numpy`` array and returns a transformed copy. """ return a**self.power def inverted(self): """ Override this method so matplotlib knows how to get the inverse transform for this transform. """ return PowerScale.PowerTransform(1. / self.power) # Now that the Scale class has been defined, it must be registered so # that ``matplotlib`` can find it. mscale.register_scale(PowerScale)
gpl-2.0
samzhang111/scikit-learn
sklearn/datasets/tests/test_svmlight_format.py
228
11221
from bz2 import BZ2File import gzip from io import BytesIO import numpy as np import os import shutil from tempfile import NamedTemporaryFile from sklearn.externals.six import b from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import raises from sklearn.utils.testing import assert_in import sklearn from sklearn.datasets import (load_svmlight_file, load_svmlight_files, dump_svmlight_file) currdir = os.path.dirname(os.path.abspath(__file__)) datafile = os.path.join(currdir, "data", "svmlight_classification.txt") multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt") invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt") invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt") def test_load_svmlight_file(): X, y = load_svmlight_file(datafile) # test X's shape assert_equal(X.indptr.shape[0], 7) assert_equal(X.shape[0], 6) assert_equal(X.shape[1], 21) assert_equal(y.shape[0], 6) # test X's non-zero values for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5), (1, 5, 1.0), (1, 12, -3), (2, 20, 27)): assert_equal(X[i, j], val) # tests X's zero values assert_equal(X[0, 3], 0) assert_equal(X[0, 5], 0) assert_equal(X[1, 8], 0) assert_equal(X[1, 16], 0) assert_equal(X[2, 18], 0) # test can change X's values X[0, 2] *= 2 assert_equal(X[0, 2], 5) # test y assert_array_equal(y, [1, 2, 3, 4, 1, 2]) def test_load_svmlight_file_fd(): # test loading from file descriptor X1, y1 = load_svmlight_file(datafile) fd = os.open(datafile, os.O_RDONLY) try: X2, y2 = load_svmlight_file(fd) assert_array_equal(X1.data, X2.data) assert_array_equal(y1, y2) finally: os.close(fd) def test_load_svmlight_file_multilabel(): X, y = load_svmlight_file(multifile, multilabel=True) assert_equal(y, [(0, 1), (2,), (), (1, 2)]) def test_load_svmlight_files(): X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2, dtype=np.float32) assert_array_equal(X_train.toarray(), X_test.toarray()) assert_array_equal(y_train, y_test) assert_equal(X_train.dtype, np.float32) assert_equal(X_test.dtype, np.float32) X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3, dtype=np.float64) assert_equal(X1.dtype, X2.dtype) assert_equal(X2.dtype, X3.dtype) assert_equal(X3.dtype, np.float64) def test_load_svmlight_file_n_features(): X, y = load_svmlight_file(datafile, n_features=22) # test X'shape assert_equal(X.indptr.shape[0], 7) assert_equal(X.shape[0], 6) assert_equal(X.shape[1], 22) # test X's non-zero values for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (1, 5, 1.0), (1, 12, -3)): assert_equal(X[i, j], val) # 21 features in file assert_raises(ValueError, load_svmlight_file, datafile, n_features=20) def test_load_compressed(): X, y = load_svmlight_file(datafile) with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp: tmp.close() # necessary under windows with open(datafile, "rb") as f: shutil.copyfileobj(f, gzip.open(tmp.name, "wb")) Xgz, ygz = load_svmlight_file(tmp.name) # because we "close" it manually and write to it, # we need to remove it manually. os.remove(tmp.name) assert_array_equal(X.toarray(), Xgz.toarray()) assert_array_equal(y, ygz) with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp: tmp.close() # necessary under windows with open(datafile, "rb") as f: shutil.copyfileobj(f, BZ2File(tmp.name, "wb")) Xbz, ybz = load_svmlight_file(tmp.name) # because we "close" it manually and write to it, # we need to remove it manually. os.remove(tmp.name) assert_array_equal(X.toarray(), Xbz.toarray()) assert_array_equal(y, ybz) @raises(ValueError) def test_load_invalid_file(): load_svmlight_file(invalidfile) @raises(ValueError) def test_load_invalid_order_file(): load_svmlight_file(invalidfile2) @raises(ValueError) def test_load_zero_based(): f = BytesIO(b("-1 4:1.\n1 0:1\n")) load_svmlight_file(f, zero_based=False) def test_load_zero_based_auto(): data1 = b("-1 1:1 2:2 3:3\n") data2 = b("-1 0:0 1:1\n") f1 = BytesIO(data1) X, y = load_svmlight_file(f1, zero_based="auto") assert_equal(X.shape, (1, 3)) f1 = BytesIO(data1) f2 = BytesIO(data2) X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto") assert_equal(X1.shape, (1, 4)) assert_equal(X2.shape, (1, 4)) def test_load_with_qid(): # load svmfile with qid attribute data = b(""" 3 qid:1 1:0.53 2:0.12 2 qid:1 1:0.13 2:0.1 7 qid:2 1:0.87 2:0.12""") X, y = load_svmlight_file(BytesIO(data), query_id=False) assert_array_equal(y, [3, 2, 7]) assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]]) res1 = load_svmlight_files([BytesIO(data)], query_id=True) res2 = load_svmlight_file(BytesIO(data), query_id=True) for X, y, qid in (res1, res2): assert_array_equal(y, [3, 2, 7]) assert_array_equal(qid, [1, 1, 2]) assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]]) @raises(ValueError) def test_load_invalid_file2(): load_svmlight_files([datafile, invalidfile, datafile]) @raises(TypeError) def test_not_a_filename(): # in python 3 integers are valid file opening arguments (taken as unix # file descriptors) load_svmlight_file(.42) @raises(IOError) def test_invalid_filename(): load_svmlight_file("trou pic nic douille") def test_dump(): Xs, y = load_svmlight_file(datafile) Xd = Xs.toarray() # slicing a csr_matrix can unsort its .indices, so test that we sort # those correctly Xsliced = Xs[np.arange(Xs.shape[0])] for X in (Xs, Xd, Xsliced): for zero_based in (True, False): for dtype in [np.float32, np.float64, np.int32]: f = BytesIO() # we need to pass a comment to get the version info in; # LibSVM doesn't grok comments so they're not put in by # default anymore. dump_svmlight_file(X.astype(dtype), y, f, comment="test", zero_based=zero_based) f.seek(0) comment = f.readline() try: comment = str(comment, "utf-8") except TypeError: # fails in Python 2.x pass assert_in("scikit-learn %s" % sklearn.__version__, comment) comment = f.readline() try: comment = str(comment, "utf-8") except TypeError: # fails in Python 2.x pass assert_in(["one", "zero"][zero_based] + "-based", comment) X2, y2 = load_svmlight_file(f, dtype=dtype, zero_based=zero_based) assert_equal(X2.dtype, dtype) assert_array_equal(X2.sorted_indices().indices, X2.indices) if dtype == np.float32: assert_array_almost_equal( # allow a rounding error at the last decimal place Xd.astype(dtype), X2.toarray(), 4) else: assert_array_almost_equal( # allow a rounding error at the last decimal place Xd.astype(dtype), X2.toarray(), 15) assert_array_equal(y, y2) def test_dump_multilabel(): X = [[1, 0, 3, 0, 5], [0, 0, 0, 0, 0], [0, 5, 0, 1, 0]] y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]] f = BytesIO() dump_svmlight_file(X, y, f, multilabel=True) f.seek(0) # make sure it dumps multilabel correctly assert_equal(f.readline(), b("1 0:1 2:3 4:5\n")) assert_equal(f.readline(), b("0,2 \n")) assert_equal(f.readline(), b("0,1 1:5 3:1\n")) def test_dump_concise(): one = 1 two = 2.1 three = 3.01 exact = 1.000000000000001 # loses the last decimal place almost = 1.0000000000000001 X = [[one, two, three, exact, almost], [1e9, 2e18, 3e27, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] y = [one, two, three, exact, almost] f = BytesIO() dump_svmlight_file(X, y, f) f.seek(0) # make sure it's using the most concise format possible assert_equal(f.readline(), b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n")) assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n")) assert_equal(f.readline(), b("3.01 \n")) assert_equal(f.readline(), b("1.000000000000001 \n")) assert_equal(f.readline(), b("1 \n")) f.seek(0) # make sure it's correct too :) X2, y2 = load_svmlight_file(f) assert_array_almost_equal(X, X2.toarray()) assert_array_equal(y, y2) def test_dump_comment(): X, y = load_svmlight_file(datafile) X = X.toarray() f = BytesIO() ascii_comment = "This is a comment\nspanning multiple lines." dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False) f.seek(0) X2, y2 = load_svmlight_file(f, zero_based=False) assert_array_almost_equal(X, X2.toarray()) assert_array_equal(y, y2) # XXX we have to update this to support Python 3.x utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc") f = BytesIO() assert_raises(UnicodeDecodeError, dump_svmlight_file, X, y, f, comment=utf8_comment) unicode_comment = utf8_comment.decode("utf-8") f = BytesIO() dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False) f.seek(0) X2, y2 = load_svmlight_file(f, zero_based=False) assert_array_almost_equal(X, X2.toarray()) assert_array_equal(y, y2) f = BytesIO() assert_raises(ValueError, dump_svmlight_file, X, y, f, comment="I've got a \0.") def test_dump_invalid(): X, y = load_svmlight_file(datafile) f = BytesIO() y2d = [y] assert_raises(ValueError, dump_svmlight_file, X, y2d, f) f = BytesIO() assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f) def test_dump_query_id(): # test dumping a file with query_id X, y = load_svmlight_file(datafile) X = X.toarray() query_id = np.arange(X.shape[0]) // 2 f = BytesIO() dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True) f.seek(0) X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True) assert_array_almost_equal(X, X1.toarray()) assert_array_almost_equal(y, y1) assert_array_almost_equal(query_id, query_id1)
bsd-3-clause
sapfo/medeas
src/old_scripts/main_simul_eigenvalues_distribution.py
1
7741
#!/usr/bin/env python """ Created Wed Oct 7 15:04:36 CEST 2015 @author: sapfo """ import matplotlib #matplotlib.use('Agg') import simul_ms import python_cmdscale #import python_pca import exp import sys import numpy as np import pylab as py from scipy.stats import norm ''' We want to pick n1, n2, D, T? Simulate data Compute the distance matrix MDS the distance matrix Get coordinates Get eigenvalues, eigenvectors Plot comparing with the other eigenvalues ''' #################### FIXED ############# n = 30 n1 = 5 n2 = 5 n3 = 5 D = 0.4 D1 = 0.1 #(D1<D) nreps = 1000 ## simulate data rescaling = 2.0 verbose = False ########### 1 population ############## print "########### 1 population ##############" ## expected tree length for one population exp_tree_length = 0 for i in range(2,n+1): exp_tree_length += 2./(i-1) nsnps = [100] T_mds = {} T_pca = {} Eigenvalues_mds = [] Distances_noise = [] Expected_Delta = np.zeros((n,n)) for kk in range(1,n): Expected_Delta += np.eye(n,k=kk) Expected_Delta += np.eye(n,k=-kk) Expected_Delta *= 2./exp_tree_length print Expected_Delta for nsnp in nsnps: T_mds[nsnp] = [] T_pca[nsnp] = [] for iteration in range(nreps): params,data,tree_lengths = simul_ms.ms_one_pops(n=n,nreps=nsnp,verbose=0) Delta = simul_ms.distance_matrix(data=data,verbose=0) if verbose: print "Delta: ",Delta Diff = Delta - Expected_Delta if verbose: print "Diff: ",Diff Distances_noise += list(Diff.flatten()) #Expected_Delta = zeros evals_mds, evecs_mds, Y_mds = python_cmdscale.cmdscale(Delta) Eigenvalues_mds += list(evals_mds[:-1]) #evals_pca, evecs_pca, Y_pca = python_pca.PCA(data.T) #print "params: ",params if verbose: print "average tree length (computed with ms): ",rescaling*np.average(tree_lengths) if verbose: print "expected tree length (analytical coal): ",exp_tree_length # mds expected total tree length, bias, rmse t_mds = (2./(np.average(evals_mds[:-1])))**(1/2.) T_mds[nsnp].append(t_mds) if verbose: print "expected T (mds) from eigenvalues: ",T_mds # pca expected tree length, bias, rmse #t_pca = 1./np.average(evals_pca[:-1]) #T_pca[nsnp].append(t_pca) #if verbose: print "expected T (pca) from eigenvalues: ",T_pca print "expected lambda1 (mds) for (Ivan analytical): ",2./((exp_tree_length)**2) #print "expected lambda1 (pca) for (Ivan analytical): ",1./((exp_tree_length)) #print "observed lambda1 (mds procedure): ",evals_mds[0] #print "observed lambda1 (pca procedure): ",evals_pca[0] #print "observed average lambdas (mds): ",np.average(evals_mds[:-1]) #print "observed average lambdas (pca): ",np.average(evals_pca[:-1]) #print "evals (first 10): ",evals_mds[:10] mu1,std1 = norm.fit(Eigenvalues_mds) mu2,std2 = norm.fit(Distances_noise) fig = py.figure() py.suptitle("1 population, %s snps, %s rep"%(nsnp,nreps)) ax1 = fig.add_subplot(2,1,1) py.title("Eigenvalues") py.hist(Eigenvalues_mds,normed=True,alpha=0.5) py.vlines(2./((exp_tree_length)**2),0,10,color='red') xmin,xmax=py.xlim() x = np.linspace(xmin,xmax,100) p = norm.pdf(x,mu1,std1) py.plot(x,p,'k',linewidth=2) ax1 = fig.add_subplot(2,1,2) py.title("Distances") py.hist(Distances_noise,normed=True,alpha=0.5) xmin,xmax=py.xlim() x = np.linspace(xmin,xmax,100) p = norm.pdf(x,mu2,std2) py.plot(x,p,'k',linewidth=2) #py.savefig("Eigenvalues_mds_1pop.pdf") py.show() sys.exit() ### plotting one population ### py.plot(Y[:,0],(Y[:,1]),'o',color='blue') py.title("simulations 1 population n = %s, nreps = %s "%(n,nreps)) py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1]))) py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1]))) ########### 2 populations ############## print "########### 2 populations ##############" #ms simul params_2pops,data_2pops,tree_lengths_2pops = simul_ms.ms_two_pops(n1=n1, n2=n2, D=1./rescaling*D,nreps=nreps,verbose=0) avg_tree_length_2pops = rescaling*np.average(tree_lengths_2pops) Delta_2pops = simul_ms.distance_matrix(data=data_2pops,verbose=0) #cmdscale evals_2pops, evecs_2pops, Y_2pops = python_cmdscale.cmdscale(Delta_2pops) exp.T_D_two_pops(eigenvalues = evals_2pops,n1=n1,n2=n2,diploid=2) # analytical params_exp_2pops,evals_exp_2pops, evec_exp_2pops = exp.two_pops(n1=n1, n2=n2, D=D, T=avg_tree_length_2pops) print "params_2pops (ms): ",params_2pops print "params_exp_2pops: ",params_exp_2pops print "average tree length (ms): ",rescaling*np.average(tree_lengths_2pops) #print "expected tree length (coal): ",exp_tree_length print "expected lambda1 (analytical): ",evals_exp_2pops[0] print "observed lambda1 (cmdscale): ",evals_2pops[0] print "expected lambda2 (analytical): ",evals_exp_2pops[1] print "observed lambda2 (cmdscale): ",evals_2pops[1] print "average observed lambda2...n-1 (cmdscale): ",np.average(evals_2pops[1:-1]) print evals_exp_2pops[:10] print evals_2pops[:10] #print "observed lambda1 (mds): ",evals[0] #print "observed average lambdas (mds): ",np.average(evals[:-1]) ### plotting two population ### py.figure() py.plot(Y_2pops[:,0][:n1],Y_2pops[:,1][:n1],'x',color='orange') py.plot(Y_2pops[:,0][n1:],Y_2pops[:,1][n1:],'o',color='blue') py.title("simulations 2 pops n1 = %s, n2 = %s, D = %s, nreps = %s "%(n1,n2,D,nreps)) py.xlabel("dim 1") py.ylabel("dim 2") #py.xlabel("dim 1 (%.2f %%)"%(1.*evals[0]/np.average(evals[:-1]))) #py.ylabel("dim 2 (%.2f %%)"%(1.*evals[1]/np.average(evals[:-1]))) #py.show() ########### 3 populations ############## print "########### 3 populations ##############" nreps = 100 #ms simul params_3pops,data_3pops,tree_lengths_3pops = simul_ms.ms_three_pops(n1=n1, n2=n2, n3=n3, D=1./rescaling*D, D1 = 1./rescaling*D1,nreps=nreps,verbose=0) avg_tree_length_3pops = rescaling*np.average(tree_lengths_3pops) Delta_3pops = simul_ms.distance_matrix(data=data_3pops,verbose=0) #cmdscale evals_3pops, evecs_3pops, Y_3pops = python_cmdscale.cmdscale(Delta_3pops) try: Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp = exp.T_D_D1_three_pops(eigenvalues = evals_3pops,n1=n1,n2=n2,n3=n3,diploid=2) except: Texp,Dexp,D1exp,Drescaledexp,D1rescaledexp= 1,1,1,1,1 print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops) print "params_3pops (ms): ",params_3pops # analytical params_exp_3pops,evals_exp_3pops, evec_exp_3pops = exp.three_pops(n1=n1, n2=n2, n3=n3, D=D, D1=D1, T=avg_tree_length_3pops) print "params_3pops (ms): ",params_3pops print "params_exp_3pops: ",params_exp_3pops print "average tree length (ms): ",rescaling*np.average(tree_lengths_3pops) #print "expected tree length (coal): ",exp_tree_length print "expected lambda1 (analytical): ",evals_exp_3pops[0] print "observed lambda1 (cmdscale): ",evals_3pops[0] print "" print "expected lambda2 (analytical): ",evals_exp_3pops[1] print "observed lambda2 (cmdscale): ",evals_3pops[1] print "" print "expected lambda3 (analytical): ",evals_exp_3pops[2] print "observed lambda3 (cmdscale): ",evals_3pops[2] print "average observed lambda3...n-1 (cmdscale): ",np.average(evals_3pops[2:-1]) print evals_exp_3pops[:10] print evals_3pops[:10] sys.exit() ### plotting three population ### py.figure() py.plot(Y_3pops[:,0][:n1],Y_3pops[:,1][:n1],'D',color='orange') py.plot(Y_3pops[:,0][n1:n1+n2],Y_3pops[:,1][n1:n1+n2],'o',color='blue') py.plot(Y_3pops[:,0][n1+n2:],Y_3pops[:,1][n1+n2:],'v',color='green') py.title("simulations 3 pops n1 = %(n1)s, n2 = %(n2)s, n3 = %(n3)s, D = %(D)s, D1 = %(D1)s, nreps = %(nreps)s "%params_3pops) py.xlabel("dim 1") py.ylabel("dim 2") py.show() ########### 4 populations and above ##############
gpl-3.0
Aasmi/scikit-learn
benchmarks/bench_plot_parallel_pairwise.py
297
1247
# Author: Mathieu Blondel <mathieu@mblondel.org> # License: BSD 3 clause import time import pylab as pl from sklearn.utils import check_random_state from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_kernels def plot(func): random_state = check_random_state(0) one_core = [] multi_core = [] sample_sizes = range(1000, 6000, 1000) for n_samples in sample_sizes: X = random_state.rand(n_samples, 300) start = time.time() func(X, n_jobs=1) one_core.append(time.time() - start) start = time.time() func(X, n_jobs=-1) multi_core.append(time.time() - start) pl.figure('scikit-learn parallel %s benchmark results' % func.__name__) pl.plot(sample_sizes, one_core, label="one core") pl.plot(sample_sizes, multi_core, label="multi core") pl.xlabel('n_samples') pl.ylabel('Time (s)') pl.title('Parallel %s' % func.__name__) pl.legend() def euclidean_distances(X, n_jobs): return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs) def rbf_kernels(X, n_jobs): return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1) plot(euclidean_distances) plot(rbf_kernels) pl.show()
bsd-3-clause
fabioticconi/scikit-learn
sklearn/utils/tests/test_estimator_checks.py
69
3894
import scipy.sparse as sp import numpy as np import sys from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.utils.testing import assert_raises_regex, assert_true from sklearn.utils.estimator_checks import check_estimator from sklearn.utils.estimator_checks import check_estimators_unfitted from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import MultiTaskElasticNet from sklearn.utils.validation import check_X_y, check_array class CorrectNotFittedError(ValueError): """Exception class to raise if estimator is used before fitting. Like NotFittedError, it inherits from ValueError, but not from AttributeError. Used for testing only. """ class BaseBadClassifier(BaseEstimator, ClassifierMixin): def fit(self, X, y): return self def predict(self, X): return np.ones(X.shape[0]) class NoCheckinPredict(BaseBadClassifier): def fit(self, X, y): X, y = check_X_y(X, y) return self class NoSparseClassifier(BaseBadClassifier): def fit(self, X, y): X, y = check_X_y(X, y, accept_sparse=['csr', 'csc']) if sp.issparse(X): raise ValueError("Nonsensical Error") return self def predict(self, X): X = check_array(X) return np.ones(X.shape[0]) class CorrectNotFittedErrorClassifier(BaseBadClassifier): def fit(self, X, y): X, y = check_X_y(X, y) self.coef_ = np.ones(X.shape[1]) return self def predict(self, X): if not hasattr(self, 'coef_'): raise CorrectNotFittedError("estimator is not fitted yet") X = check_array(X) return np.ones(X.shape[0]) def test_check_estimator(): # tests that the estimator actually fails on "bad" estimators. # not a complete test of all checks, which are very extensive. # check that we have a set_params and can clone msg = "it does not implement a 'get_params' methods" assert_raises_regex(TypeError, msg, check_estimator, object) # check that we have a fit method msg = "object has no attribute 'fit'" assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator) # check that fit does input validation msg = "TypeError not raised by fit" assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier) # check that predict does input validation (doesn't accept dicts in input) msg = "Estimator doesn't check for NaN and inf in predict" assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict) # check for sparse matrix input handling name = NoSparseClassifier.__name__ msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data" # the check for sparse input handling prints to the stdout, # instead of raising an error, so as not to remove the original traceback. # that means we need to jump through some hoops to catch it. old_stdout = sys.stdout string_buffer = StringIO() sys.stdout = string_buffer try: check_estimator(NoSparseClassifier) except: pass finally: sys.stdout = old_stdout assert_true(msg in string_buffer.getvalue()) # doesn't error on actual estimator check_estimator(AdaBoostClassifier) check_estimator(MultiTaskElasticNet) def test_check_estimators_unfitted(): # check that a ValueError/AttributeError is raised when calling predict # on an unfitted estimator msg = "AttributeError or ValueError not raised by predict" assert_raises_regex(AssertionError, msg, check_estimators_unfitted, "estimator", NoSparseClassifier) # check that CorrectNotFittedError inherit from either ValueError # or AttributeError check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
bsd-3-clause
GGiecold/ECLAIR
src/ECLAIR/Statistical_performance/Robustness_analysis.py
1
18145
#!/usr/bin/env python # ECLAIR/src/ECLAIR/Statistics/Robustness_analysis.py; # Author: Gregory Giecold for the GC Yuan Lab # Affiliation: Harvard University # Contact: g.giecold@gmail.com; ggiecold@jimmy.harvard.edu """ECLAIR is a package for the robust and scalable inference of cell lineages from gene expression data. ECLAIR achieves a higher level of confidence in the estimated lineages through the use of approximation algorithms for consensus clustering and by combining the information from an ensemble of minimum spanning trees so as to come up with an improved, aggregated lineage tree. In addition, the present package features several customized algorithms for assessing the similarity between weighted graphs or unrooted trees and for estimating the reproducibility of each edge to a given tree. References ---------- * Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C., "Robust Lineage Reconstruction from High-Dimensional Single-Cell Data". ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748 * Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework for Combining Multiple Partitions". In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 * Conte, D., Foggia, P., Sansone, C. and Vento, M., "Thirty Years of Graph Matching in Pattern Recognition". In: International Journal of Pattern Recognition and Artificial Intelligence, 18, 3, pp. 265-298. 2004 """ from __future__ import print_function from ..Build_instance import ECLAIR_core as ECLR from .Statistical_tests import robustness_metrics from collections import namedtuple import numpy as np import operator import os import pkg_resources import random from sklearn import cross_validation from sklearn.metrics import pairwise_distances_argmin_min from tempfile import NamedTemporaryFile import tarfile import time import zipfile __all__ = ['ECLAIR_generator', 'experiment_1', 'experiment_2', 'experiment_3'] def extract_file(path, output_directory = '.'): if path.endswith('.zip'): opener, mode = zipfile.ZipFile, 'r' elif path.endswith('.tar.gz') or path.endswith('.tgz'): opener, mode = tarfile.open, 'r:gz' elif path.endswith('.tar.bz2') or path.endswith('.tbz'): opener, mode = tarfile.open, 'r:bz2' else: raise ValueError, "\nERROR: ECLAIR: Robustness_analysis: failed to extract {0}; no appropriate extractor could be found".format(path) cwd = os.getcwd() os.chdir(output_directory) try: file = opener(path, mode) try: file.extractall() finally: file.close() finally: os.chdir(cwd) def ECLAIR_generator(data_file_name, sampling_fraction, N_runs, N_iter, method, k, output_directory, data_flag = 'CyTOF'): """Automatically runs the ECLAIR method on a dataset accessible via 'data_file_name' so as to generate 'N_iter' independent consensus clusterings and associated minimum spanning trees. Parameters ---------- data_file_name : file object or string A path to the dataset under consideration. Any dataset can be submitted to this routine, with the proviso that it has previously been mangled to meet a few constraints regarding headers, delimiters, etc. Those constraints are handled hereby for a qPCR dataset and an aggregation of flow cytometry mouse bone marrow samples. sampling_fraction : float Specifies the number of points to downsample from the dataset considered a each of 'N_iter' stages, before applying k-means clustering to this group obtained via a density-based approach. k : int The parameter used for k-means clustering each downsampled group of data-points, as required for all 'N_runs' intermediate steps of ECLAIR. N_runs : int The number of independent runs of downsampling and clustering to perform before applying our ensemble clustering algorithm to this group. N_iter : int The number of ensemble clusterings and accompanying trees to generate by k-fold cross validation, with k = N_iter. We randomly reshuffle the dataset and split it into N_iter equally-sized parts. Of the N_iter subsamples, a single subsample is kept as a 'validation data', while the other serve as the 'training data' from which we build an ensemble clustering and afferent minimum spanning tree. Only upon obtaining this ensemble clustering, do we ascribe each data point from the left-over 'validation' subsample to its nearest cluster in gene expression space. Each sample from the whole dataset therefore has a cluster label. Such vectors of cluster identities are then used in other functions of this module for various comparisons between trees and consensus clusterings. output_directory : file object or string The path to the folder where the information and figures associated with each of 'N_iter' rounds of consensus clustering are to be stored. test_set_flag : bool, optional (default = False) data_flag : string, optional (default = 'CyTOF') Allows the processing of a 'CyTOF' dataset (Supplementary dataset 2 from Qiu et al., Nature Biotechnology, Vol. 29, 10 (2011)) Returns ------- name_tags : list Within 'output_directory', records the names of the folder associated to each of 'N_iter' consensus clusterings obtained. """ assert method in {'hierarchical', 'k-means'} assert data_flag in {'CyTOF', 'qPCR'} # Our method has been thoroughly tested on the two corresponding datasets. # Unlike the preceding procedures, 'ECLAIR_generator' is akin to a script # due to all the peculiarities in the number of features kept # for downstream analysis, separators, etc. if data_flag == 'CyTOF': skiprows = 1 delimiter = '\t' usecols = [3, 4, 5, 7, 8, 9, 10, 12, 13] elif data_flag == 'qPCR': skiprows = 1 delimiter = '\t' usecols = xrange(1, 49) # keeping open the addition of other datasets # to be submitted to the present routine with open(data_file_name, 'r') as f: data = np.loadtxt(f, dtype = float, skiprows = skiprows, delimiter = delimiter, usecols = usecols) # in the case of the CyTOF mouse bone marrow experiment, # load the samples resulting from an arcSinh transformation # applied to the raw dataset if method == 'hierarchical': HIERARCHICAL_parameters = namedtuple('HIERARCHICAL_parameters', 'clustering_method k') clustering_parameters = HIERARCHICAL_parameters('hierarchical', k) elif method == 'k-means': KMEANS_parameters = namedtuple('KMEANS_parameters', 'clustering_method k') clustering_parameters = KMEANS_parameters('k-means', k) # leaving open the extension of this analysis to other clustering methods CC_parameters = namedtuple('CC_parameters', 'N_runs sampling_fraction N_cc') cc_parameters = CC_parameters(N_runs, sampling_fraction, k) try: os.makedirs(output_directory) except OSError: if not os.path.isdir(output_directory): print('\nECLAIR_generator\t ERROR\n') raise N_samples = data.shape[0] # separate the samples into 'N_iter' groups of equal size, # by random selection with no replacement: kf = cross_validation.KFold(N_samples, n_folds = N_iter, shuffle = True) name_tags = [] c = 1 for test_indices, train_indices in kf: training_data = np.take(data, train_indices, axis = 0) if data_flag == 'CyTOF': # replacing by cell IDs the column keeping # track of measurement times: training_data[:, 0] = np.arange(train_indices.size) train_indices = train_indices.reshape((1, train_indices.size)) with open(output_directory + '/training_{}.txt'.format(c), 'w') as f: np.savetxt(f, train_indices, fmt = '%d', delimiter = '\t', newline = '\n') with open(output_directory + '/training_data_{}.tsv'.format(c), 'w') as f: np.savetxt(f, training_data, fmt = '%.6f', delimiter = '\t') Data_info = namedtuple('Data_info', 'data_file_name expected_N_samples skip_rows cell_IDs_column extra_excluded_columns time_info_column') data_info = Data_info(output_directory + '/training_data_{}.tsv'.format(c), train_indices.size, 0, 0, None, -1) with NamedTemporaryFile('w', suffix = '.h5', delete = True, dir = './') as f: name_tag = ECLR.ECLAIR_processing(f.name, data_info, clustering_parameters, cc_parameters, output_directory) name_tags.append(name_tag) cluster_IDs_file = output_directory + '/ECLAIR_ensemble_clustering_files/' + str(name_tags[-1]) + '/consensus_labels.txt' with open(cluster_IDs_file, 'r') as f: cluster_IDs = np.loadtxt(f, dtype = int) method = clustering_parameters.clustering_method cluster_IDs = upsample(test_indices, cluster_IDs, data, method, xrange(1, data.shape[1])) os.remove(cluster_IDs_file) with open(cluster_IDs_file, 'w') as f: np.savetxt(f, cluster_IDs, fmt = '%d', delimiter = '\t') c += 1 return name_tags def upsample(test_indices, training_set_cluster_IDs, data, method = 'k-means', usecols = None): N_samples = test_indices.size + training_set_cluster_IDs.size assert N_samples == data.shape[0] full_set_cluster_IDs = np.zeros(N_samples, dtype = int) training_indices = np.setdiff1d(np.arange(N_samples), test_indices, True) full_set_cluster_IDs[training_indices] = training_set_cluster_IDs if usecols is not None: usecols = list(usecols) data = np.take(data, usecols, 1) training_data = np.delete(data, test_indices, axis = 0) max_ID = np.amax(training_set_cluster_IDs) centroids = np.zeros((max_ID + 1, data.shape[1]), dtype = float) for cluster in xrange(max_ID + 1): samples_in_cluster = np.where(training_set_cluster_IDs == cluster)[0] if method == 'hierarchical': centroids[cluster] = np.median(training_data[samples_in_cluster], axis = 0) else: centroids[cluster] = training_data[samples_in_cluster].mean(axis = 0) test_data = np.take(data, test_indices, axis = 0) test_set_cluster_IDs, _ = pairwise_distances_argmin_min(test_data, centroids, metric = 'manhattan' if method == 'hierarchical' else 'euclidean') full_set_cluster_IDs[test_indices] = test_set_cluster_IDs return full_set_cluster_IDs def experiment_1(N_iter, data_flags, method = 'k-means', test_set_flag = True): """ Parameters: ----------- N_iter : int Number of replicate experiments to generate """ assert not reduce(operator.and_, data_flags) assert reduce(operator.xor, data_flags) assert isinstance(N_iter, int) and N_iter > 1 try: os.makedirs('./ECLAIR_performance') except OSError: if not os.path.isdir('./ECLAIR_performance'): print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n') raise start_t = time.time() ECLAIR_qPCR_flag, ECLAIR_CyTOF_flag, SPADE_CyTOF_flag = data_flags if ECLAIR_CyTOF_flag: output_directory = './ECLAIR_performance/ECLAIR_test_sets_CyTOF' try: os.makedirs(output_directory) except OSError: if not os.path.isdir(output_directory): print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n') raise # Access path to the CyTOF mouse bone marrow dataset compressed_data_path = pkg_resources.resource_filename(__name__, 'data/SPADE_data/nbt-SD2-Transformed.tsv.tar.gz') extract_file(compressed_data_path, './ECLAIR_performance') data_file = './ECLAIR_performance/nbt-SD2-Transformed.tsv' max_N_clusters = 50 name_tags = ECLAIR_generator(data_file, 0.5, 100, N_iter, method, max_N_clusters, output_directory) _ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags, output_directory, test_set_flag) _ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags, output_directory, test_set_flag, MST_flag = False) elif ECLAIR_qPCR_flag: data_file = pkg_resources.resource_filename(__name__, 'data/Guoji_data/qPCR.txt') output_directory = './ECLAIR_performance/ECLAIR_test_sets_qPCR' try: os.makedirs(output_directory) except OSError: if not os.path.isdir(output_directory): print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n') raise max_N_clusters = 13 name_tags = ECLAIR_generator(data_file, 0.2, 50, N_iter, method, max_N_clusters, output_directory, 'qPCR') _ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags, output_directory, test_set_flag) _ = robustness_metrics(max_N_clusters, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags, output_directory, test_set_flag, MST_flag = False) elif SPADE_CyTOF_flag: max_N_clusters = 50 output_directory = './ECLAIR_performance/SPADE_test_sets_CyTOF' try: os.makedirs(output_directory) except OSError: if not os.path.isdir(output_directory): print('\nERROR: ECLAIR: Robustness_analysis: experiment_1\n') raise SPADE_files = pkg_resources.resource_filename(__name__, 'data/SPADE_test_sets') for i in xrange(1, 4): with open(SPADE_files + '/training_{0}.txt'.format(i), 'r') as f: training_set = np.loadtxt(f, dtype = int, delimiter = '\t') with open(output_directory + '/training_{0}.txt'.format(i), 'w') as f: np.savetxt(f, training_set, fmt = '%d', delimiter = '\t') name_tags = ['training_1', 'training_2', 'training_3'] _ = robustness_metrics(max_N_clusters, SPADE_files, name_tags, output_directory, test_set_flag) end_t = time.time() print('\n{}_robustness\t SUMMARY\t:\nthe whole process of comparing those minimum-spanning trees and the associated consensus clusterings took {} seconds.\n'.format('SPADE' if SPADE_CyTOF_flag else 'ECLAIR', round(end_t - start_t, 2))) def experiment_2(data_file_name, k, sampling_fraction = 0.2, N_runs = 50): output_directory = './ECLAIR_performance/ECLAIR_same_dataset' try: os.makedirs(output_directory) except OSError: raise with open(data_file_name, 'r') as f: data = np.loadtxt(f, dtype = float, skiprows = 1, delimiter = '\t') N_samples = data.shape[0] for i in xrange(1, 51): with open(output_directory + '/training_{}.txt'.format(i), 'w') as f: np.savetxt(f, np.arange(N_samples), fmt = '%d') KMEANS_parameters = namedtuple('KMEANS_parameters', 'clustering_method k') clustering_parameters = KMEANS_parameters('k-means', k) CC_parameters = namedtuple('CC_parameters', 'N_runs sampling_fraction N_cc') cc_parameters = CC_parameters(N_runs, sampling_fraction, k) Data_info = namedtuple('Data_info', 'data_file_name expected_N_samples skip_rows cell_IDs_column extra_excluded_columns time_info_column') data_info = Data_info(data_file_name, N_samples, 1, 0, None, -1) name_tags = [] for i in xrange(50): with NamedTemporaryFile('w', suffix = '.h5', delete = True, dir = './') as f: name_tag = ECLR.ECLAIR_processing(f.name, data_info, clustering_parameters, cc_parameters, output_directory) name_tags.append(name_tag) _ = robustness_metrics(k, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags, output_directory, test_set_flag = False) _ = robustness_metrics(k, output_directory + '/ECLAIR_ensemble_clustering_files', name_tags, output_directory, test_set_flag = False, MST_flag = False) def experiment_3(): output_directory = './ECLAIR_performance/SPADE_same_dataset' try: os.makedirs(output_directory) except OSError: if not os.path.isdir(output_directory): print('\nERROR: ECLAIR: Robustness_analysis: experiment_3\n') raise max_N_clusters = 50 name_tags = ['training_{0}'.format(i) for i in xrange(1, 11)] SPADE_files = pkg_resources.resource_filename(__name__, 'data/SPADE_same_dataset') with open(SPADE_files + '/training.txt', 'r') as f: training_set = np.loadtxt(f, dtype = int, delimiter = '\t') for i in xrange(1, 11): with open(output_directory + '/training_{0}.txt'.format(i), 'w') as f: np.savetxt(f, training_set, fmt = '%d', delimiter = '\t') _ = robustness_metrics(max_N_clusters, SPADE_files, name_tags, output_directory, test_set_flag = False)
mit
pgaref/memcached_bench
Python_plots/plots/qjump_utils.py
3
4079
# Copyright (c) 2015, Malte Schwarzkopf # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of qjump-nsdi15-plotting nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from matplotlib import use, rc use('Agg') import matplotlib.pyplot as plt import numpy as np # plot saving utility function def writeout(filename_base, tight=True): for fmt in ['pdf']: if tight: plt.savefig("%s.%s" % (filename_base, fmt), format=fmt, bbox_inches='tight') else: plt.savefig("%s.%s" % (filename_base, fmt), format=fmt) def set_leg_fontsize(size): rc('legend', fontsize=size) def set_paper_rcs(): rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'], 'serif':['Helvetica'],'size':9}) rc('text', usetex=True) rc('legend', fontsize=8) rc('figure', figsize=(3.33,2.22)) # rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95) rc('axes', linewidth=0.5) rc('lines', linewidth=0.5) def set_rcs(): rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'], 'serif':['Times'],'size':12}) rc('text', usetex=True) rc('legend', fontsize=7) rc('figure', figsize=(6,4)) rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95) rc('axes', linewidth=0.5) rc('lines', linewidth=0.5) def append_or_create(d, i, e): if not i in d: d[i] = [e] else: d[i].append(e) def add_or_create(d, i, e): if not i in d: d[i] = e else: d[i] = d[i] + e # event log constants RESOURCE_UTILIZATION_SAMPLE = 0 TX_SUCCEEDED = 1 TX_FAILED = 2 COLLECTION_ENDING = 3 VMS_CHANGED_STATE = 4 SCHEDULING_OUTCOME = 5 COLLECTION_SUBMITTED = 6 SCHEDULING_TIME = 7 ZOMBIE_COLLECTION_DROPPED = 8 OVERLAP_COLLECTION_DROPPED = 9 COLLECTION_TRUNCATED = 10 CELL_STATE_SETUP = 11 END_ONLY_ENDS = 12 ARRIVAL_SAMPLE = 100 LEAVING_SAMPLE = 101 RES_LIMIT_SAMPLE = 102 ACTIVE_SAMPLE = 103 COLLECTION_ARRIVING_EVENT = 104 COLLECTION_LEAVING_EVENT = 105 MAPREDUCE_PREDICTION = 200 MAPREDUCE_ORIGINAL_RUNTIME = 201 MAPREDUCE_RESOURCE_ADJUSTMENT = 202 MAPREDUCE_BASE_RUNTIME = 203 paper_figsize_small = (1.1, 1.1) paper_figsize_small_square = (1.5, 1.5) paper_figsize_medium = (2, 1.33) paper_figsize_medium_square = (2, 2) #paper_figsize_medium = (1.66, 1.1) paper_figsize_large = (3.33, 2.22) paper_figsize_bigsim3 = (2.4, 1.7) #8e053b red #496ee2 blue #ef9708 orange paper_colors = ['#496ee2', '#8e053b', 'g', '#ef9708', '0', '#eeefff', '0.5', 'c', '0.7'] # ----------------------------------- def think_time_fn(x, y, s): return x + y * s # ----------------------------------- def get_mad(median, data): devs = [abs(x - median) for x in data] mad = np.median(devs) return mad # -----------------------------------
mit
ChrisEberl/Python_DIC
functions/CpCorr.py
1
14243
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on --/--/20-- @author: --- Revised by Charlie Bourigault @contact: bourigault.charlie@gmail.com Please report issues and request on the GitHub project from ChrisEberl (Python_DIC) More details regarding the project on the GitHub Wiki : https://github.com/ChrisEberl/Python_DIC/wiki Current File: This file has been translated, adapted and further developed from 'Digital Image Correlation and Tracking' for Matlab exchanged by Melanie Senn on Mathworks """ import numpy as np, cv2, scipy.interpolate def cpcorr(InputPoints,BasePoints,Input,Base, CORRSIZE): [xymoving_in,xyfixed_in,moving,fixed] = ParseInputs(InputPoints,BasePoints,Input,Base) CorrCoef=[] # get all rectangle coordinates rects_moving = np.array(calc_rects(xymoving_in,CORRSIZE,moving)).astype(np.int) rects_fixed = np.array(calc_rects(xyfixed_in,2*CORRSIZE,fixed)).astype(np.int) ncp = len(np.atleast_1d(xymoving_in)) xymoving = xymoving_in # initialize adjusted control points matrix CorrCoef=np.zeros((ncp,1)) StdX=np.zeros((ncp,1)) StdY=np.zeros((ncp,1)) errorInfos = np.zeros((ncp,1)) #### Error Type #### # 1 : Edge Area # 2 : Marker Out # 3 : Non finite number # 4 : No Std. Dev # 5 : SubPx outside limits # 6 : Div. by 0 # 7 : Low Corr. # 8 : Peak badly constrained #################### for icp in range(ncp): if (rects_moving[2][icp] == 0 and rects_moving[3][icp] == 0) or (rects_fixed[2][icp] == 0 and rects_moving[3][icp] == 0): #near edge, unable to adjust #print 'CpCorr : Edge area. No Adjustement.' errorInfos[icp] = 1 continue sub_moving = moving[rects_moving[1][icp]:rects_moving[1][icp]+rects_moving[3][icp],rects_moving[0][icp]:rects_moving[0][icp]+rects_moving[2][icp]] sub_fixed = fixed[rects_fixed[1][icp]:rects_fixed[1][icp]+rects_fixed[3][icp],rects_fixed[0][icp]:rects_fixed[0][icp]+rects_fixed[2][icp]] #make sure the image data exist if sub_moving.shape[0] == 0 or sub_moving.shape[1] == 0 or sub_fixed.shape[0] == 0 or sub_fixed.shape[1] == 0: #print 'CpCorr : Marker out of image.' errorInfos[icp] = 2 continue #make sure finite if np.logical_or(np.any(np.isfinite(sub_moving[:])==False),np.any(np.isfinite(sub_fixed[:]))==False): # NaN or Inf, unable to adjust #print 'CpCorr : Wrong Number. No Adjustement.' errorInfos[icp] = 3 continue # check that template rectangle moving has nonzero std if np.std(sub_moving[:])== 0: # zero standard deviation of template image, unable to adjust #print 'CpCorr : No Std Dev. No Adjustement.' errorInfos[icp] = 4 continue norm_cross_corr = cv2.matchTemplate(sub_moving,sub_fixed,cv2.TM_CCORR_NORMED) #norm_cross_corr=scipy.signal.correlate2d(sub_fixed, sub_moving) #norm_cross_corr=sklearn.preprocessing.normalize(norm_cross_corr, norm='l2', axis=1, copy=True) #norm_cross_corr=match_template(sub_fixed,sub_moving) # get subpixel resolution from cross correlation subpixel = True [xpeak, ypeak, stdx, stdy, corrcoef, info] = findpeak(norm_cross_corr,subpixel) CorrCoef[icp]=corrcoef StdX[icp]=stdx StdY[icp]=stdy xpeak = float(xpeak) ypeak = float(ypeak) if info == 1: errorInfos[icp] = 5 elif info == 2: errorInfos[icp] = 6 # eliminate any poor correlations THRESHOLD = 0.5 if (corrcoef < THRESHOLD): # low correlation, unable to adjust #print 'CpCorr : Low Correlation. Marker avoided.' errorInfos[icp] = 7 continue # offset found by cross correlation corroffset = [xpeak-CORRSIZE, ypeak-CORRSIZE] # eliminate any big changes in control points if corroffset[0] > (CORRSIZE-1) or corroffset[1] > (CORRSIZE-1): # peak of norxcorr2 not well constrained, unable to adjust #print 'CpCorr : Peak not well constrained. No adjustement' errorInfos[icp] = 8 continue movingfractionaloffset = np.array([xymoving[icp,:] - np.around(xymoving[icp,:])]) fixedfractionaloffset = np.array([xyfixed_in[icp,:] - np.around(xyfixed_in[icp,:])]) # adjust control point xymoving[icp,:] = xymoving[icp,:] - movingfractionaloffset - corroffset + fixedfractionaloffset #xymoving[icp,:] = xymoving[icp,:] - corroffset return xymoving,StdX,StdY,CorrCoef, errorInfos def calc_rects(xy,halfwidth,img): # Calculate rectangles so imcrop will return image with xy coordinate inside center pixel default_width = 2*halfwidth default_height = default_width [row, col] = img.shape # xy specifies center of rectangle, need upper left upperleft=np.around(xy)-halfwidth lowerright=np.around(xy)+halfwidth # need to modify for pixels near edge of images left = upperleft[:,0] upper = upperleft[:,1] right = lowerright[:,0] lower = lowerright[:,1] #lower = upper + default_height #right = left + default_width width = default_width * np.ones(np.shape(upper)) height = default_height * np.ones(np.shape(upper)) #check edges for coordinates outside image [upper, height] = adjust_lo_edge(upper,1,height) [lower, height] = adjust_hi_edge(lower,row,height) [left,width] = adjust_lo_edge(left,1,width) [right, width] = adjust_hi_edge(right,col,width) # set width and height to zero when less than default size #iw = find(width<default_width) #ih = find(height<default_height) #idx = unique([iw,ih]) #width[idx] = 0 #height[idx] = 0 rect = [left.astype(np.int), upper.astype(np.int), width.astype(np.int), height.astype(np.int)] return rect def adjust_lo_edge(coordinates,edge,breadth): for indx in range(0,len(coordinates)): if coordinates[indx] < edge: #breadth[indx] = breadth[indx] - np.absolute(coordinates[indx]-edge) breadth[indx] = 0 coordinates[indx] = edge return coordinates, breadth def adjust_hi_edge(coordinates,edge,breadth): for indx in range(0,len(coordinates)): if coordinates[indx] > edge: #breadth[indx] = breadth[indx] - np.absolute(coordinates[indx]-edge) breadth[indx] = 0 coordinates[indx] = edge return coordinates, breadth def ParseInputs(InputPoints,BasePoints,Input,Base): xymoving_in = InputPoints xyfixed_in = BasePoints moving = Input fixed = Base return xymoving_in,xyfixed_in,moving,fixed # sub pixel accuracy by 2D polynomial fit (quadratic) def findpeak(f,subpixel): stdx=1e-4 stdy=1e-4 # Get absolute peak pixel max_f = np.amax(f) [xpeak,ypeak] = np.unravel_index(f.argmax(), f.shape) #coordinates of the maximum value in f if subpixel == False or xpeak==0 or xpeak==np.shape(f)[0]-1 or ypeak==0 or ypeak==np.shape(f)[1]-1: # on edge #print 'CpCorr : No Subpixel Adjustement.' return ypeak, xpeak, stdx, stdy, max_f, 0# return absolute peak else: # fit a 2nd order polynomial to 9 points # using 9 pixels centered on irow,jcol u = f[xpeak-1:xpeak+2,ypeak-1:ypeak+2] u = np.reshape(np.transpose(u),(9,1)) x = np.array([-1, 0, 1, -1, 0, 1, -1, 0, 1]) y = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1]) x = np.reshape(x,(9,1)) y = np.reshape(y,(9,1)) # u(x,y) = A(0) + A(1)*x + A(2)*y + A(3)*x*y + A(4)*x^2 + A(5)*y^2 X = np.hstack((np.ones((9,1)), x, y, x*y, x**2, y**2)) # u = X*A #A = np.linalg.lstsq(X,u, rcond=1e-1) A = np.linalg.lstsq(X,u, rcond=1e-20) e = A[1] #residuals returned by Linalg Lstsq A=np.reshape(A[0],(6,1)) # A[0] array of least square solution to the u = AX equation # get absolute maximum, where du/dx = du/dy = 0 x_num = (-A[2]*A[3]+2*A[5]*A[1]) y_num = (-A[3]*A[1]+2*A[4]*A[2]) den = (A[3]**2-4*A[4]*A[5]) x_offset = x_num / den y_offset = y_num / den #print x_offset, y_offset if np.absolute(x_offset)>1 or np.absolute(y_offset)>1: #print 'CpCorr : Subpixel outside limit. No adjustement' # adjusted peak falls outside set of 9 points fit, return ypeak, xpeak, stdx, stdy, max_f, 1 # return absolute peak #x_offset = np.round(10000*x_offset)/10000 #y_offset = np.round(10000*y_offset)/10000 x_offset = np.around(x_offset, decimals=4) y_offset = np.around(y_offset, decimals=4) xpeak = xpeak + x_offset ypeak = ypeak + y_offset #print xpeak, ypeak # calculate residuals #e=u-np.dot(X,A) # calculate estimate of the noise variance n=9 # number of data points p=6 # number of fitted parameters var=np.sum(e**2)/(n-p) # calculate covariance matrix cov=np.linalg.inv(np.dot(np.transpose(X),X))*var # produce vector of std deviations on each term s=np.sqrt([cov[0,0],cov[1,1],cov[2,2],cov[3,3],cov[4,4],cov[5,5]]) # Calculate standard deviation of denominator, and numerators if A[1] == 0 or A[2] == 0 or A[3] == 0 or A[4] == 0 or A[5] == 0: #avoid divide by zero error and invalid value #print 'CpCorr : Div. by 0 error escaped.' return ypeak, xpeak, stdx, stdy, max_f, 2# return absolute peak else: x_num_std=np.sqrt(4*A[5]**2*A[1]**2*((s[5]/A[5])**2+(s[1]/A[1])**2)+A[2]**2*A[3]**2*((s[2]/A[2])**2+(s[3]/A[3])**2)) den_std=np.sqrt(16*A[4]**2*A[5]**2*((s[4]/A[4])**2+(s[5]/A[5])**2)+2*s[3]**2*A[3]**2) y_num_std=np.sqrt(4*A[4]**2*A[2]**2*((s[4]/A[4])**2+(s[2]/A[2])**2)+A[3]**2*A[1]**2*((s[3]/A[3])**2+(s[1]/A[1])**2)) # Calculate standard deviation of x and y positions stdx=np.sqrt(x_offset**2*((x_num_std/x_num)**2+(den_std/den)**2)) stdy=np.sqrt(y_offset**2*((den_std/den)**2+(y_num_std/y_num)**2)) # Calculate extremum of fitted function max_f = np.dot([1, x_offset, y_offset, x_offset*y_offset, x_offset**2, y_offset**2],A) max_f = np.absolute(max_f) return ypeak, xpeak, stdx, stdy, max_f, 0 # sub pixel accuracy by upsampling and interpolation def findpeak2(f,subpixel): stdx=1e-4 stdy=1e-4 kernelsize=3 # get absolute peak pixel max_f = np.amax(f) [xpeak,ypeak] = np.unravel_index(f.argmax(), f.shape) if subpixel==False or xpeak < kernelsize or xpeak > np.shape(f)[0]-kernelsize or ypeak < kernelsize or ypeak > np.shape(f)[1]-kernelsize: # on edge return xpeak, ypeak, stdx, stdy, max_f # return absolute peak else: # determine sub pixel accuracy by upsampling and interpolation fextracted=f[xpeak-kernelsize:xpeak+kernelsize+1,ypeak-kernelsize:ypeak+kernelsize+1] totalsize=2*kernelsize+1 upsampling=totalsize*10+1 #step=2/upsampling x=np.linspace(-kernelsize,kernelsize,totalsize) #[X,Y]=np.meshgrid(x,x) xq=np.linspace(-kernelsize,kernelsize,upsampling) #[Xq,Yq]=np.meshgrid(xq,xq) bilinterp = interpolate.interp2d(x, x, fextracted, kind='cubic') fq = bilinterp(xq, xq) #splineint = RectBivariateSpline(x, x, fextracted, kx=3, ky=3, s=0) #fq=splineint(xq,xq) #fq=griddata((x, x), fextracted, (Xq, Yq), method='cubic') max_f = np.amax(fq) [xpeaknew,ypeaknew] = np.unravel_index(fq.argmax(), fq.shape) #xoffset=Xq[0,xpeaknew] #yoffset=Yq[ypeaknew,0] xoffset=xq[xpeaknew] yoffset=xq[ypeaknew] # return only one-thousandths of a pixel precision xoffset = np.round(1000*xoffset)/1000 yoffset = np.round(1000*yoffset)/1000 xpeak=xpeak+xoffset ypeak=ypeak+yoffset # peak width (full width at half maximum) scalehalfwidth=1.1774; fextractedx=np.mean(fextracted,0) fextractedy=np.mean(fextracted,1) stdx=scalehalfwidth*np.std(fextractedx) stdy=scalehalfwidth*np.std(fextractedy) return xpeak, ypeak, stdx, stdy, max_f # sub pixel accuracy by centroid def findpeak3(f,subpixel): stdx=1e-4 stdy=1e-4 kernelsize=3 # get absolute peak pixel max_f = np.amax(f) [xpeak,ypeak] = np.unravel_index(f.argmax(), f.shape) if subpixel==False or xpeak < kernelsize or xpeak > np.shape(f)[0]-kernelsize or ypeak < kernelsize or ypeak > np.shape(f)[1]-kernelsize: # on edge return xpeak, ypeak, stdx, stdy, max_f # return absolute peak else: # determine sub pixel accuracy by centroid fextracted=f[xpeak-kernelsize:xpeak+kernelsize+1,ypeak-kernelsize:ypeak+kernelsize+1] fextractedx=np.mean(fextracted,0) fextractedy=np.mean(fextracted,1) x=np.arange(-kernelsize,kernelsize+1,1) y=np.transpose(x) xoffset=np.dot(x,fextractedx) yoffset=np.dot(y,fextractedy) # return only one-thousandths of a pixel precision xoffset = np.round(1000*xoffset)/1000 yoffset = np.round(1000*yoffset)/1000 xpeak=xpeak+xoffset ypeak=ypeak+yoffset # 2D linear interpolation bilinterp = interpolate.interp2d(x, x, fextracted, kind='linear') max_f = bilinterp(xoffset,yoffset) # peak width (full width at half maximum) scalehalfwidth=1.1774 stdx=scalehalfwidth*np.std(fextractedx) stdy=scalehalfwidth*np.std(fextractedy) return xpeak, ypeak, stdx, stdy, max_f
apache-2.0
yunfeilu/scikit-learn
examples/feature_selection/plot_rfe_with_cross_validation.py
226
1384
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
mattions/TimeScales
branch_dist/import_digitezed_data.py
1
4254
# Author Michele Mattioni # Fri Oct 23 15:41:58 BST 2009 import pylab import numpy as np from numpy import sin, exp import matplotlib.pyplot as plt from helpers.loader import Loader class FitHandler(object): """Fit the data with a polynomial""" def fit(self, data, terms): polycoeffs = np.polyfit(data.x, data.y, terms) poly = np.poly1d(polycoeffs) return poly def plot_poly(self, pfit): plt.plot(data.x, pfit(data.x), label="poly %i" %pfit.order) def fit_and_plot(self, data, order): p = self.fit(data, order) self.plot_poly(p) return p def plot_data(self, data): plt.plot(data.x, data.y, 'k.', label="data") plt.xlabel("Distance from the soma [um]") plt.ylabel("Surface Area [um]/Dendritic Lenght [um^2]") def integrate_till_value(self, x0, value, poly, increment, scale_branch): """Integrate the polynomial from x0 to the value required :Params: x0: starting point for the integration value: objective value to reach poly: polynomial to use to calculate :return: x1: ending point of the integration """ delta = 0 x1 = x0 while value >= delta: x1 += increment delta = poly(x1)/scale_branch - poly(x0)/scale_branch return (x1, delta) def calc_spines_pos(self, cursor_list, x1_list): """Calculate the spines position, returning the mid point of the interval from the two list.""" mid_points = [] for i, el in enumerate(cursor_list): mid_point = cursor_list[i] + (x1_list[i] - cursor_list[i])/2 mid_points.append(mid_point) return mid_points if __name__ == "__main__": from scipy.optimize import leastsq data = pylab.csv2rec('spines_distribution_Wilson_1992.csv') pfh = FitHandler() pfh.plot_data(data) order = 17 pfit = pfh.fit_and_plot(data, order) plt.title("Fitting the data") plt.legend() plt.savefig("Fitted_data.png") # Integrating pInteg = pfit.integ() plt.figure() pfh.plot_poly(pInteg) plt.title("Integral area") # We get the area per number of branch (4): scale_branch = 4 area_per_branch = pInteg(data.x)/scale_branch plt.plot(data.x, area_per_branch, label='area branch') plt.legend(loc=0) plt.savefig('integral_area.png') # Calculating the spine dimension """ Procedure to get this right: - Compute the total surface from Wolf of all the spines # 1525 spines total, 381 per branch - Rescale the whole surface Wolf spines surface to the Wilson one - Compute the spine equivalent surface Wilson2Wolf - Integrate until the surface in the Wilson world match one spine surface - take the (x_end - x_start)/2 position - iterate """ spine_Wolf = 6.35 # um^2 total_number_spines = 1525 spines_per_branch = 381 total_Wolf = spines_per_branch * spine_Wolf total_Wilson = pInteg(220)/scale_branch #Value of the integral at the last bit # spine_Wolf : spine_Wilson = total_Wolf : total_Wilson spine_Wilson = (spine_Wolf * total_Wilson)/ total_Wolf increment =0.001 cursor = 0 cursor_list = [] x1_list = [] delta_list = [] print "Calculating the spines' position. It will take a bit." while cursor <= data.x[-1]: x1, delta = pfh.integrate_till_value(cursor, spine_Wilson, pInteg, increment, scale_branch) cursor_list.append(cursor) x1_list.append(x1) delta_list.append(delta) cursor = x1 # Resetting the cursor to the x1 spines_pos = pfh.calc_spines_pos(cursor_list, x1_list) plt.figure() plt.hist(spines_pos, bins=30) plt.title("spines distribution for branch") #plt.savefig('spines_distribution.png') #filename = 'spines_pos.pickle' #l = Loader() #l.save(spines_pos, '.', filename) plt.show()
bsd-3-clause
xzh86/scikit-learn
sklearn/metrics/cluster/tests/test_unsupervised.py
230
2823
import numpy as np from scipy.sparse import csr_matrix from sklearn import datasets from sklearn.metrics.cluster.unsupervised import silhouette_score from sklearn.metrics import pairwise_distances from sklearn.utils.testing import assert_false, assert_almost_equal from sklearn.utils.testing import assert_raises_regexp def test_silhouette(): # Tests the Silhouette Coefficient. dataset = datasets.load_iris() X = dataset.data y = dataset.target D = pairwise_distances(X, metric='euclidean') # Given that the actual labels are used, we can assume that S would be # positive. silhouette = silhouette_score(D, y, metric='precomputed') assert(silhouette > 0) # Test without calculating D silhouette_metric = silhouette_score(X, y, metric='euclidean') assert_almost_equal(silhouette, silhouette_metric) # Test with sampling silhouette = silhouette_score(D, y, metric='precomputed', sample_size=int(X.shape[0] / 2), random_state=0) silhouette_metric = silhouette_score(X, y, metric='euclidean', sample_size=int(X.shape[0] / 2), random_state=0) assert(silhouette > 0) assert(silhouette_metric > 0) assert_almost_equal(silhouette_metric, silhouette) # Test with sparse X X_sparse = csr_matrix(X) D = pairwise_distances(X_sparse, metric='euclidean') silhouette = silhouette_score(D, y, metric='precomputed') assert(silhouette > 0) def test_no_nan(): # Assert Silhouette Coefficient != nan when there is 1 sample in a class. # This tests for the condition that caused issue 960. # Note that there is only one sample in cluster 0. This used to cause the # silhouette_score to return nan (see bug #960). labels = np.array([1, 0, 1, 1, 1]) # The distance matrix doesn't actually matter. D = np.random.RandomState(0).rand(len(labels), len(labels)) silhouette = silhouette_score(D, labels, metric='precomputed') assert_false(np.isnan(silhouette)) def test_correct_labelsize(): # Assert 1 < n_labels < n_samples dataset = datasets.load_iris() X = dataset.data # n_labels = n_samples y = np.arange(X.shape[0]) assert_raises_regexp(ValueError, 'Number of labels is %d\. Valid values are 2 ' 'to n_samples - 1 \(inclusive\)' % len(np.unique(y)), silhouette_score, X, y) # n_labels = 1 y = np.zeros(X.shape[0]) assert_raises_regexp(ValueError, 'Number of labels is %d\. Valid values are 2 ' 'to n_samples - 1 \(inclusive\)' % len(np.unique(y)), silhouette_score, X, y)
bsd-3-clause
jrmyp/attelo
attelo/metrics/constituency.py
3
11012
"""Metrics for constituency trees. TODO ---- * [ ] factor out the report from the parseval function, see `sklearn.metrics.classification.classification_report` * [ ] refactor the selection functions that enable to break down evaluations, to avoid almost duplicates (as currently) """ from __future__ import print_function import numpy as np from .classification_structured import (precision_recall_fscore_support, unique_labels) from .util import get_spans # label extraction functions LBL_FNS = [ ('S', lambda span: 1), ('S+N', lambda span: span[1]), ('S+R', lambda span: span[2]), ('S+N+R', lambda span: '{}-{}'.format(span[2], span[1])), ] # PARSEVAL metrics adapted to the evaluation of discourse parsers, # with options to get meaningful variants in specific settings def discourse_parseval_scores(ctree_true, ctree_pred, labels=None, average=None): """Compute discourse PARSEVAL scores for ctree_pred wrt ctree_true. Parameters ---------- ctree_true : list of list of RSTTree or SimpleRstTree ctree_pred : list of list of RSTTree or SimpleRstTree labels : list of string, optional Corresponds to sklearn's target_names IMO Returns ------- precision : float (if average is not None) or array of float, shape =\ [n_unique_labels] Weighted average of the precision of each class. recall : float (if average is not None) or array of float, shape =\ [n_unique_labels] fbeta_score : float (if average is not None) or array of float, shape =\ [n_unique_labels] support : int (if average is not None) or array of int, shape =\ [n_unique_labels] The number of occurrences of each label in ``ctree_true``. References ---------- .. [1] `Daniel Marcu (2000). "The theory and practice of discourse parsing and summarization." MIT press. """ # extract descriptions of spans from the true and pred trees spans_true = [get_spans(ct_true) for ct_true in ctree_true] spans_pred = [get_spans(ct_pred) for ct_pred in ctree_pred] # use lbl_fn to define labels spans_true = [[(span[0], lbl_fn(span)) for span in spans] for spans in spans_true] spans_pred = [[(span[0], lbl_fn(span)) for span in spans] for spans in spans_pred] p, r, f, s = precision_recall_fscore_support(spans_true, spans_pred, labels=labels, average=average) return p, r, f, s def parseval_report(ctree_true, ctree_pred, metric_types=None, digits=4, stringent=False): """Build a text report showing the PARSEVAL discourse metrics. This is the simplest report we need to generate, it corresponds to the arrays of results from the literature. Metrics are calculated globally (average='micro'). Parameters ---------- metric_types: list of strings, optional Metrics that need to be included in the report ; if None is given, defaults to ['S', 'S+N', 'S+R', 'S+N+R']. """ if metric_types is None: metric_types = ['S', 'S+N', 'S+R', 'S+N+R'] if set(metric_types) - set(x[0] for x in LBL_FNS): raise ValueError('Unknown metric types in {}'.format(metric_types)) # FIXME refactor in tandem with discourse_parseval_scores, to # get a coherent and non-redundant API # extract descriptions of spans from the true and pred trees spans_true = [get_spans(ct_true) for ct_true in ctree_true] spans_pred = [get_spans(ct_pred) for ct_pred in ctree_pred] # prepare report width = max(len(str(x)) for x in metric_types) width = max(width, digits) headers = ["precision", "recall", "f1-score", "support"] fmt = '%% %ds' % width # first col: class name fmt += ' ' fmt += ' '.join(['% 9s' for _ in headers]) fmt += '\n' headers = [""] + headers report = fmt % tuple(headers) report += '\n' # end prepare report metric2lbl_fn = dict(LBL_FNS) for metric_type in metric_types: lbl_fn = metric2lbl_fn[metric_type] # possibly filter data sp_true = spans_true sp_pred = spans_pred if stringent: # stringent variant: if metric_type == 'S': # * S: exclude leaves sp_true = [[xi for xi in x if xi[0][1] != xi[0][0]] for x in sp_true] sp_pred = [[xi for xi in x if xi[0][1] != xi[0][0]] for x in sp_pred] elif False and metric_type in ['S+R', 'S+N+R']: # * S+R, S+N+R: exclude 'span' sp_true = [[xi for xi in x if xi[2] != 'span'] for x in sp_true] sp_pred = [[xi for xi in x if xi[2] != 'span'] for x in sp_pred] # end filter y_true = [[(span[0], lbl_fn(span)) for span in spans] for spans in sp_true] y_pred = [[(span[0], lbl_fn(span)) for span in spans] for spans in sp_pred] # calculate metric p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, average='micro') # report values = [metric_type] for v in (p, r, f1): values += ["{0:0.{1}f}".format(v, digits)] values += ["{0}".format(s)] report += fmt % tuple(values) return report def parseval_detailed_report(ctree_true, ctree_pred, metric_type='S+R', labels=None, average=None, sort_by_support=True, digits=4): """Build a text report showing the PARSEVAL discourse metrics. FIXME model after sklearn.metrics.classification.classification_report Parameters ---------- ctree_true : list of RSTTree or SimpleRstTree Ground truth (correct) target structures. ctree_pred : list of RSTTree or SimpleRstTree Estimated target structures as predicted by a parser. labels : list of string, optional Relation labels to include in the evaluation. FIXME Corresponds more to target_names in sklearn IMHO. lbl_fn : function from tuple((int, int), string, string) to string Label extraction function digits : int Number of digits for formatting output floating point values. Returns ------- report : string Text summary of the precision, recall, F1 score, support for each class (or micro-averaged over all classes). References ---------- .. [1] `Daniel Marcu (2000). "The theory and practice of discourse parsing and summarization." MIT press. """ metric2lbl_fn = dict(LBL_FNS) lbl_fn = metric2lbl_fn[metric_type] # extract descriptions of spans from the true and pred trees spans_true = [get_spans(ct_true) for ct_true in ctree_true] spans_pred = [get_spans(ct_pred) for ct_pred in ctree_pred] # use lbl_fn to extract the label of interest y_true = [[(span[0], lbl_fn(span)) for span in spans] for spans in spans_true] y_pred = [[(span[0], lbl_fn(span)) for span in spans] for spans in spans_pred] present_labels = unique_labels(y_true, y_pred) if labels is None: labels = present_labels n_labels = None else: # currently not tested n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) last_line_heading = 'avg / total' width = max(len(str(lbl)) for lbl in labels) width = max(width, len(last_line_heading), digits) headers = ["precision", "recall", "f1-score", "support"] fmt = '%% %ds' % width # first col: class name fmt += ' ' fmt += ' '.join(['% 9s' for _ in headers]) fmt += '\n' headers = [""] + headers report = fmt % tuple(headers) report += '\n' # call with average=None to compute per-class scores, then # compute average here and print it p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=average) sorted_ilbls = enumerate(labels) if sort_by_support: sorted_ilbls = sorted(sorted_ilbls, key=lambda x: s[x[0]], reverse=True) # one line per label for i, label in sorted_ilbls: values = [label] for v in (p[i], r[i], f1[i]): values += ["{0:0.{1}f}".format(v, digits)] values += ["{0}".format(s[i])] if average is None: # print per-class scores for average=None only report += fmt % tuple(values) # print only if per-class scores if average is None: report += '\n' # compute averages for the bottom line values = [last_line_heading] for v in (np.average(p, weights=s), np.average(r, weights=s), np.average(f1, weights=s)): values += ["{0:0.{1}f}".format(v, digits)] values += ['{0}'.format(np.sum(s))] report += fmt % tuple(values) return report def parseval_reports(ctree_true, ctree_pred, labels=None, average=None, digits=2): """Build a text report showing the PARSEVAL discourse metrics. FIXME model after sklearn.metrics.classification.classification_report Parameters ---------- ctree_true : list of RSTTree or SimpleRstTree Ground truth (correct) target structures. ctree_pred : list of RSTTree or SimpleRstTree Estimated target structures as predicted by a parser. labels : list of string, optional Relation labels to include in the evaluation. FIXME Corresponds more to target_names in sklearn IMHO. digits : int Number of digits for formatting output floating point values. Returns ------- report : string Text summary of the precision, recall, F1 score, support for each class (or micro-averaged over all classes). References ---------- .. [1] `Daniel Marcu (2000). "The theory and practice of discourse parsing and summarization." MIT press. """ # extract one report per type of metric reports = [] for metric_type, lbl_fn in LBL_FNS: lbls = labels if metric_type in ['S+R', 'S+N+R'] else None reports.append((metric_type, parseval_report(ctree_true, ctree_pred, lbl_fn, labels=lbls, average=average, digits=digits))) return reports
gpl-3.0
madjelan/scikit-learn
examples/feature_stacker.py
246
1906
""" ================================================= Concatenating multiple feature extraction methods ================================================= In many real-world examples, there are many ways to extract features from a dataset. Often it is beneficial to combine several methods to obtain good performance. This example shows how to use ``FeatureUnion`` to combine features obtained by PCA and univariate selection. Combining features using this transformer has the benefit that it allows cross validation and grid searches over the whole process. The combination used in this example is not particularly helpful on this dataset and is only used to illustrate the usage of FeatureUnion. """ # Author: Andreas Mueller <amueller@ais.uni-bonn.de> # # License: BSD 3 clause from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.grid_search import GridSearchCV from sklearn.svm import SVC from sklearn.datasets import load_iris from sklearn.decomposition import PCA from sklearn.feature_selection import SelectKBest iris = load_iris() X, y = iris.data, iris.target # This dataset is way to high-dimensional. Better do PCA: pca = PCA(n_components=2) # Maybe some original features where good, too? selection = SelectKBest(k=1) # Build estimator from PCA and Univariate selection: combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)]) # Use combined features to transform dataset: X_features = combined_features.fit(X, y).transform(X) svm = SVC(kernel="linear") # Do grid search over k, n_components and C: pipeline = Pipeline([("features", combined_features), ("svm", svm)]) param_grid = dict(features__pca__n_components=[1, 2, 3], features__univ_select__k=[1, 2], svm__C=[0.1, 1, 10]) grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10) grid_search.fit(X, y) print(grid_search.best_estimator_)
bsd-3-clause
klusta-team/klustaviewa
klustaviewa/gui/mainwindow.py
1
49325
"""Main window.""" # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- import pprint import time from StringIO import StringIO import os import sys import inspect import logging from collections import OrderedDict from functools import partial import webbrowser import pandas as pd import numpy as np import numpy.random as rnd from qtools import QtGui, QtCore from qtools import inprocess, inthread, QT_BINDING import klustaviewa.views as vw from klustaviewa.gui.icons import get_icon from klustaviewa.control.controller import Controller from klustaviewa.wizard.wizard import Wizard from kwiklib.dataio.tools import get_array from kwiklib.dataio import KlustersLoader, KwikLoader, read_clusters from klustaviewa.gui.buffer import Buffer from klustaviewa.gui.dock import ViewDockWidget, DockTitleBar from klustaviewa.stats.cache import StatsCache from klustaviewa.stats.correlograms import NCORRBINS_DEFAULT, CORRBIN_DEFAULT from klustaviewa.stats.correlations import normalize from kwiklib.utils import logger as log from kwiklib.utils.logger import FileLogger, register, unregister from kwiklib.utils.persistence import encode_bytearray, decode_bytearray from klustaviewa import USERPREF from klustaviewa import SETTINGS from klustaviewa import APPNAME, ABOUT, get_global_path from klustaviewa import get_global_path from klustaviewa.gui.threads import ThreadedTasks, OpenTask from klustaviewa.gui.taskgraph import TaskGraph import rcicons # ----------------------------------------------------------------------------- # Main Window # ----------------------------------------------------------------------------- class MainWindow(QtGui.QMainWindow): def __init__(self, parent=None, dolog=True, filename=None, shank=None): self.views = {} super(MainWindow, self).__init__(parent) self.views = {} # HACK: display the icon in Windows' taskbar. if os.name == 'nt': try: import ctypes myappid = 'klustateam.klustaviewa' ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) except: pass self.dolog = dolog if self.dolog: create_file_logger() self.initialize_view_logger() log.debug("Using {0:s}.".format(QT_BINDING)) # Main window options. self.move(50, 50) self.setWindowTitle('KlustaViewa') # Focus options. self.setFocusPolicy(QtCore.Qt.WheelFocus) self.setMouseTracking(True) # Dock widgets options. self.setDockNestingEnabled(True) self.setAnimated(False) self.setWindowIcon(get_icon('logo')) # Initialize some variables. self.statscache = None # self.loader = KlustersLoader() self.loader = KwikLoader(userpref=USERPREF) self.loader.progressReported.connect(self.open_progress_reported) self.loader.saveProgressReported.connect(self.save_progress_reported) self.wizard = Wizard() self.controller = None self.spikes_highlighted = [] self.spikes_selected = [] self._wizard = False self.is_file_open = False self.need_save = False self.taskgraph = TaskGraph(self) self.busy_cursor = QtGui.QCursor(QtCore.Qt.BusyCursor) self.normal_cursor = QtGui.QCursor(QtCore.Qt.ArrowCursor) self.is_busy = False self.override_color = False self.computing_correlograms = False self.computing_matrix = False # Create the main window. self.create_views() self.create_file_actions() self.create_edit_actions() self.create_view_actions() self.create_correlograms_actions() self.create_control_actions() self.create_wizard_actions() self.create_help_actions() self.create_menu() self.create_toolbar() self.create_open_progress_dialog() self.create_save_progress_dialog() self.create_threads() # Update action enabled/disabled property. self.update_action_enabled() # Show the main window. self.set_styles() self.restore_geometry() # Automatically load a file upon startup if requested. if filename: filename = os.path.realpath(filename) self.open_task.open(self.loader, filename, shank) self.show() def set_styles(self): # set stylesheet path = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(path, "styles.css") with open(path, 'r') as f: stylesheet = f.read() stylesheet = stylesheet.replace('%ACCENT%', '#cdcdcd') stylesheet = stylesheet.replace('%ACCENT2%', '#a0a0a0') stylesheet = stylesheet.replace('%ACCENT3%', '#909090') stylesheet = stylesheet.replace('%ACCENT4%', '#cdcdcd') self.setStyleSheet(stylesheet) def set_busy_cursor(self): cursor = QtGui.QApplication.overrideCursor() if cursor is None or cursor.shape() != QtCore.Qt.BusyCursor: QtGui.QApplication.setOverrideCursor(self.busy_cursor) def set_normal_cursor(self): # QtGui.QApplication.setOverrideCursor(self.normal_cursor) QtGui.QApplication.restoreOverrideCursor() def set_busy(self, computing_correlograms=None, computing_matrix=None): if computing_correlograms is not None: self.computing_correlograms = computing_correlograms if computing_matrix is not None: self.computing_matrix = computing_matrix busy = self.computing_correlograms or self.computing_matrix if busy: self.set_busy_cursor() self.is_busy = True else: self.set_normal_cursor() self.is_busy = False def initialize_view_logger(self): # Initialize the view logger. viewlogger = vw.ViewLogger(name='viewlogger', fmt='%(message)s', level=USERPREF['loglevel'], print_caller=False) register(viewlogger) viewlogger.outlog.writeRequested.connect(self.log_view_write_callback) self.view_logger_text = StringIO() # Actions. # -------- def add_action(self, name, text, callback=None, shortcut=None, checkable=False, checked=False, icon=None): action = QtGui.QAction(text, self) if callback is None: callback = getattr(self, name + '_callback', None) if callback: action.triggered.connect(callback) if shortcut: action.setShortcut(shortcut) if icon: action.setIcon(get_icon(icon)) action.setCheckable(checkable) action.setChecked(checked) setattr(self, name + '_action', action) def create_file_actions(self): # Open actions. self.add_action('open', '&Open', shortcut='Ctrl+O', icon='open') # Open last file action path = SETTINGS['main_window.last_data_file'] if path: lastfile = os.path.basename(path) if len(lastfile) > 30: lastfile = '...' + lastfile[-30:] self.add_action('open_last', 'Open &last ({0:s})'.format( lastfile), shortcut='Ctrl+Alt+O') else: self.add_action('open_last', 'Open &last', shortcut='Ctrl+Alt+O') self.open_last_action.setEnabled(False) self.add_action('switch', 'S&witch shank') self.add_action('import', '&Import clustering') self.add_action('reset', '&Reset clustering') self.add_action('save', '&Save', shortcut='Ctrl+S', icon='save') # self.add_action('renumber', 'Save &renumbered') self.add_action('close', '&Close file') def create_edit_actions(self): # Undo/redo actions. self.add_action('undo', '&Undo', shortcut='Ctrl+Z', icon='undo') self.add_action('redo', '&Redo', shortcut='Ctrl+Y', icon='redo') # self.add_action('reset', 'Re&set') # Quit action. self.add_action('quit', '&Quit', shortcut='Ctrl+Q') def create_view_actions(self): self.add_action('add_feature_view', 'Add &FeatureView') self.add_action('add_waveform_view', 'Add &WaveformView') self.add_action('add_similarity_matrix_view', 'Add &SimilarityMatrixView') self.add_action('add_correlograms_view', 'Add &CorrelogramsView') self.add_action('add_ipython_view', 'Add &IPythonView') self.add_action('add_log_view', 'Add &LogView') # self.add_action('add_trace_view', 'Add &TraceView') self.add_action('reset_views', '&Reset views') self.add_action('toggle_fullscreen', 'Toggle fullscreen', shortcut='F') self.add_action('override_color', 'Override cluster &color', icon='override_color')#, shortcut='C') def create_control_actions(self): self.add_action('merge', '&Merge', shortcut='G', icon='merge') self.add_action('split', '&Split', shortcut='K', icon='split') self.add_action('recluster', '&Recluster', shortcut='CTRL+R') def create_correlograms_actions(self): self.add_action('change_ncorrbins', 'Change time &window') self.add_action('change_corrbin', 'Change &bin size') self.add_action('change_corr_normalization', 'Change &normalization') def create_wizard_actions(self): self.add_action('reset_navigation', '&Reinitialize wizard', shortcut='CTRL+ALT+Space') self.add_action('automatic_projection', '&Automatic projection', checkable=True, checked=True) self.add_action('change_candidate_color', 'Change &color of the closest match', shortcut='C') self.add_action('previous_candidate', '&Previous closest match', shortcut='SHIFT+Space') self.add_action('next_candidate', '&Skip closest match', shortcut='Space') self.add_action('skip_target', '&Skip best unsorted', # shortcut='Space' ) self.add_action('delete_candidate', 'Move closest match to &MUA', shortcut='CTRL+M') self.add_action('delete_candidate_noise', 'Move closest match to &noise', shortcut='CTRL+N') self.add_action('next_target', 'Move best unsorted to &good', shortcut='ALT+G') self.add_action('delete_target', 'Move best unsorted to &MUA', shortcut='ALT+M') self.add_action('delete_target_noise', 'Move best unsorted to &noise', shortcut='ALT+N') self.add_action('delete_both', 'Move &both to MUA', shortcut='CTRL+ALT+M') self.add_action('delete_both_noise', 'Move both to noise', shortcut='CTRL+ALT+N') def create_help_actions(self): self.add_action('about', '&About') self.add_action('manual', 'Show &manual') self.add_action('shortcuts', 'Show &shortcuts') self.add_action('open_preferences', '&Open preferences') self.add_action('refresh_preferences', '&Refresh preferences') def create_menu(self): # File menu. file_menu = self.menuBar().addMenu("&File") file_menu.addAction(self.open_action) file_menu.addAction(self.open_last_action) file_menu.addSeparator() file_menu.addAction(self.reset_action) # file_menu.addAction(self.import_action) file_menu.addSeparator() file_menu.addAction(self.save_action) # file_menu.addAction(self.renumber_action) file_menu.addSeparator() file_menu.addAction(self.switch_action) file_menu.addSeparator() file_menu.addAction(self.close_action) file_menu.addAction(self.quit_action) # Edit menu. edit_menu = self.menuBar().addMenu("&Edit") edit_menu.addAction(self.undo_action) edit_menu.addAction(self.redo_action) # View menu. views_menu = self.menuBar().addMenu("&View") views_menu.addAction(self.add_feature_view_action) views_menu.addAction(self.add_waveform_view_action) views_menu.addAction(self.add_correlograms_view_action) views_menu.addAction(self.add_similarity_matrix_view_action) # views_menu.addAction(self.add_trace_view_action) views_menu.addSeparator() views_menu.addAction(self.add_log_view_action) if vw.IPYTHON: views_menu.addAction(self.add_ipython_view_action) views_menu.addSeparator() views_menu.addAction(self.override_color_action) views_menu.addSeparator() views_menu.addAction(self.reset_views_action) views_menu.addAction(self.toggle_fullscreen_action) # Correlograms menu. correlograms_menu = self.menuBar().addMenu("&Correlograms") correlograms_menu.addAction(self.change_ncorrbins_action) correlograms_menu.addAction(self.change_corrbin_action) correlograms_menu.addSeparator() correlograms_menu.addAction(self.change_corr_normalization_action) # Actions menu. actions_menu = self.menuBar().addMenu("&Actions") actions_menu.addSeparator() actions_menu.addAction(self.get_view('ClusterView').move_to_mua_action) actions_menu.addAction(self.get_view('ClusterView').move_to_noise_action) actions_menu.addSeparator() actions_menu.addAction(self.merge_action) actions_menu.addAction(self.split_action) # actions_menu.addSeparator() # actions_menu.addAction(self.recluster_action) # Wizard menu. wizard_menu = self.menuBar().addMenu("&Wizard") # Previous/skip candidate. wizard_menu.addAction(self.next_candidate_action) wizard_menu.addAction(self.previous_candidate_action) wizard_menu.addSeparator() wizard_menu.addAction(self.skip_target_action) wizard_menu.addSeparator() # Good group. # wizard_menu.addSeparator() # Delete. wizard_menu.addAction(self.delete_candidate_action) wizard_menu.addAction(self.delete_candidate_noise_action) wizard_menu.addSeparator() wizard_menu.addAction(self.next_target_action) wizard_menu.addAction(self.delete_target_action) wizard_menu.addAction(self.delete_target_noise_action) wizard_menu.addSeparator() wizard_menu.addAction(self.delete_both_action) wizard_menu.addAction(self.delete_both_noise_action) wizard_menu.addSeparator() # Misc. wizard_menu.addAction(self.change_candidate_color_action) wizard_menu.addAction(self.automatic_projection_action) wizard_menu.addAction(self.reset_navigation_action) # Help menu. help_menu = self.menuBar().addMenu("&Help") help_menu.addAction(self.open_preferences_action) help_menu.addAction(self.refresh_preferences_action) help_menu.addSeparator() help_menu.addAction(self.shortcuts_action) help_menu.addAction(self.manual_action) help_menu.addAction(self.about_action) def create_toolbar(self): self.toolbar = self.addToolBar("KlustaViewaToolbar") self.toolbar.setObjectName("KlustaViewaToolbar") self.toolbar.addAction(self.open_action) self.toolbar.addAction(self.save_action) # self.toolbar.addAction(self.saveas_action) self.toolbar.addSeparator() self.toolbar.addAction(self.merge_action) self.toolbar.addAction(self.split_action) self.toolbar.addSeparator() self.toolbar.addAction(self.get_view('ClusterView').move_to_mua_action) self.toolbar.addAction(self.get_view('ClusterView').move_to_noise_action) self.toolbar.addSeparator() self.toolbar.addAction(self.undo_action) self.toolbar.addAction(self.redo_action) # self.toolbar.addSeparator() # self.toolbar.addAction(self.override_color_action) self.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolbar) def create_open_progress_dialog(self): self.open_progress = QtGui.QProgressDialog("Converting to Kwik...", "Cancel", 0, 0, self, QtCore.Qt.Tool) self.open_progress.setWindowModality(QtCore.Qt.WindowModal) self.open_progress.setValue(0) self.open_progress.setWindowTitle('Loading') self.open_progress.setCancelButton(None) self.open_progress.setMinimumDuration(0) def create_save_progress_dialog(self): self.save_progress = QtGui.QProgressDialog("Saving...", "Cancel", 0, 0, self, QtCore.Qt.Tool) self.save_progress.setWindowModality(QtCore.Qt.WindowModal) self.save_progress.setValue(0) self.save_progress.setWindowTitle('Saving') self.save_progress.setCancelButton(None) self.save_progress.setMinimumDuration(0) # Action enabled. # --------------- def update_action_enabled(self): self.undo_action.setEnabled(self.can_undo()) self.redo_action.setEnabled(self.can_redo()) self.merge_action.setEnabled(self.can_merge()) self.split_action.setEnabled(self.can_split()) def can_undo(self): if self.controller is None: return False return self.controller.can_undo() def can_redo(self): if self.controller is None: return False return self.controller.can_redo() def can_merge(self): cluster_view = self.get_view('ClusterView') clusters = cluster_view.selected_clusters() return len(clusters) >= 2 def can_split(self): cluster_view = self.get_view('ClusterView') clusters = cluster_view.selected_clusters() spikes_selected = self.spikes_selected return len(spikes_selected) >= 1 # View methods. # ------------- def create_view(self, view_class, position=None, closable=True, floatable=True, index=0, floating=None, title=None, **kwargs): """Add a widget to the main window.""" view = view_class(self, getfocus=False) view.set_data(**kwargs) # Create the dock widget. name = view_class.__name__ + '_' + str(index) dockwidget = ViewDockWidget(view_class.__name__) # dockwidget = ViewDockWidget(name) dockwidget.setObjectName(name) dockwidget.setWidget(view) dockwidget.closed.connect(self.dock_widget_closed) # Set dock widget options. options = QtGui.QDockWidget.DockWidgetMovable if closable: options = options | QtGui.QDockWidget.DockWidgetClosable if floatable: options = options | QtGui.QDockWidget.DockWidgetFloatable dockwidget.setFeatures(options) dockwidget.setAllowedAreas( QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea | QtCore.Qt.TopDockWidgetArea | QtCore.Qt.BottomDockWidgetArea) dockwidget.visibilityChanged.connect(partial( self.dock_visibility_changed_callback, view)) if position is not None: # Add the dock widget to the main window. self.addDockWidget(position, dockwidget) if floating is not None: dockwidget.setFloating(floating) if title is None: title = view_class.__name__ dockwidget.setTitleBarWidget(DockTitleBar(dockwidget, title)) # Return the view widget. return view def add_cluster_view(self, do_update=None, floating=False): view = self.create_view(vw.ClusterView, position=QtCore.Qt.LeftDockWidgetArea, index=len(self.views['ClusterView']), closable=False, # floatable=False ) # Connect callback functions. view.clustersSelected.connect(self.clusters_selected_callback) view.clusterColorChanged.connect(self.cluster_color_changed_callback) view.groupColorChanged.connect(self.group_color_changed_callback) view.groupRenamed.connect(self.group_renamed_callback) view.clustersMoved.connect(self.clusters_moved_callback) view.groupAdded.connect(self.group_added_callback) view.groupRemoved.connect(self.group_removed_callback) self.views['ClusterView'].append(view) if do_update: self.taskgraph.update_cluster_view() def dock_visibility_changed_callback(self, view, visibility): # Register dock widget visibility. view.visibility = visibility def restore_last_view(self, name): """Return True if the last view was successfully restored, False if the view needs to be restored manually by creating a new view.""" # No existing view: need to create a new view. if not self.views[name]: return False view = self.views[name][-1] # A view exists and it is hidden: restore it. if getattr(view, 'visibility', None) is False: view.parent().toggleViewAction().activate(QtGui.QAction.Trigger) return True # A view exists but it is not hidden: just add a new view. else: return False def add_similarity_matrix_view(self, do_update=None, floating=False): # Try restoring the last view if it exists and it is hidden, and if # successfully restored, do nothing more. Otherwise, need to create # a new view. if self.restore_last_view('SimilarityMatrixView'): return view = self.create_view(vw.SimilarityMatrixView, index=len(self.views['SimilarityMatrixView']), position=QtCore.Qt.LeftDockWidgetArea, floating=floating) view.clustersSelected.connect(self.cluster_pair_selected_callback) self.views['SimilarityMatrixView'].append(view) if do_update and self.is_file_open: self.taskgraph.update_similarity_matrix_view() def add_waveform_view(self, do_update=None, floating=False): view = self.create_view(vw.WaveformView, index=len(self.views['WaveformView']), position=QtCore.Qt.RightDockWidgetArea, floating=floating) view.spikesHighlighted.connect( self.waveform_spikes_highlighted_callback) view.boxClicked.connect(self.waveform_box_clicked_callback) self.views['WaveformView'].append(view) if do_update and self.is_file_open and self.loader.has_selection(): self.taskgraph.update_waveform_view() def add_feature_view(self, do_update=None, floating=False): view = self.create_view(vw.FeatureProjectionView, index=len(self.views['FeatureView']), position=QtCore.Qt.RightDockWidgetArea, floating=floating, title='FeatureView') view.spikesHighlighted.connect( self.features_spikes_highlighted_callback) view.spikesSelected.connect( self.features_spikes_selected_callback) self.views['FeatureView'].append(view) if do_update and self.is_file_open and self.loader.has_selection(): self.taskgraph.update_feature_view() def add_ipython_view(self, floating=None): view = self.create_view(vw.IPythonView, index=len(self.views['IPythonView']), position=QtCore.Qt.BottomDockWidgetArea, floating=True) # Create namespace for the interactive session. namespace = dict( window=self, select=self.get_view('ClusterView').select, loader=self.loader, stats=self.statscache, wizard=self.wizard, ) view.set_data(**namespace) # Load all .py files in the code directory. paths = USERPREF['ipython_import_paths'] or [] if isinstance(paths, basestring): paths = [paths] for path in paths: path = os.path.realpath(os.path.expanduser(path)) if os.path.exists(path): files = [file for file in os.listdir(path) if file.endswith('.py')] for file in files: log.debug("Running {0:s}".format(file)) view.run_file(os.path.join(path, file)) self.views['IPythonView'].append(view) def add_log_view(self, floating=None): if len(self.views['LogView']) >= 1: return view = self.create_view(vw.LogView, text=self.view_logger_text.getvalue(), position=QtCore.Qt.BottomDockWidgetArea, floating=True) self.views['LogView'].append(view) def log_view_write_callback(self, message): view = self.get_view('LogView') if view: view.append(message) self.view_logger_text.write(message) def add_correlograms_view(self, do_update=None, floating=False): view = self.create_view(vw.CorrelogramsView, index=len(self.views['CorrelogramsView']), position=QtCore.Qt.RightDockWidgetArea, floating=floating) self.views['CorrelogramsView'].append(view) if do_update and self.is_file_open and self.loader.has_selection(): self.taskgraph.update_correlograms_view() # def add_trace_view(self, do_update=None, floating=None): # # if len(self.views['TraceView']) >= 1: # # return # view = self.create_view(vw.TraceView, # index=len(self.views['TraceView']), # position=QtCore.Qt.BottomDockWidgetArea, # floating=True) # self.views['TraceView'].append(view) # if do_update and self.is_file_open: # self.taskgraph.update_trace_view() def get_view(self, name, index=0): views = self.views.get(name, []) if not views: return None else: return views[index] def get_views(self, name): return self.views[name] def create_views(self): """Create all views at initialization.""" # Create the default layout. self.views = dict( ClusterView=[], SimilarityMatrixView=[], WaveformView=[], FeatureView=[], CorrelogramsView=[], IPythonView=[], TraceView=[], LogView=[], ) count = SETTINGS['main_window.views'] if count is None: self.create_default_views() else: self.create_custom_views(count) def create_default_views(self, do_update=None, floating=False): self.add_cluster_view(do_update=do_update, floating=floating) self.add_similarity_matrix_view(do_update=do_update, floating=floating) self.splitDockWidget( self.get_view('ClusterView').parentWidget(), self.get_view('SimilarityMatrixView').parentWidget(), QtCore.Qt.Vertical ) self.add_waveform_view(do_update=do_update, floating=floating) self.add_feature_view(do_update=do_update, floating=floating) self.splitDockWidget( self.get_view('WaveformView').parentWidget(), self.get_view('FeatureView').parentWidget(), QtCore.Qt.Horizontal ) self.add_correlograms_view(do_update=do_update, floating=floating) self.splitDockWidget( self.get_view('FeatureView').parentWidget(), self.get_view('CorrelogramsView').parentWidget(), QtCore.Qt.Vertical ) def create_custom_views(self, count): [self.add_cluster_view() for _ in xrange(count.get('ClusterView', 0))] [self.add_similarity_matrix_view() for _ in xrange(count.get('SimilarityMatrixView', 0))] [self.add_waveform_view() for _ in xrange(count.get('WaveformView', 0))] [self.add_feature_view() for _ in xrange(count.get('FeatureView', 0))] [self.add_log_view() for _ in xrange(count.get('LogView', 0))] [self.add_ipython_view() for _ in xrange(count.get('IPythonView', 0))] [self.add_correlograms_view() for _ in xrange(count.get('CorrelogramsView', 0))] #[self.add_trace_view() for _ in xrange(count.get('TraceView', 0))] def dock_widget_closed(self, dock): for key in self.views.keys(): self.views[key] = [view for view in self.views[key] if view.parent() != dock] # Threads. # -------- def create_threads(self): # Create the external threads. self.open_task = inthread(OpenTask)() self.open_task.dataOpened.connect(self.open_done) self.open_task.dataSaved.connect(self.save_done) self.open_task.dataOpenFailed.connect(self.open_failed) def join_threads(self): self.open_task.join() self.taskgraph.join() # File menu callbacks. # -------------------- def open_callback(self, checked=None): # HACK: Force release of Ctrl key. self.force_key_release() folder = SETTINGS['main_window.last_data_dir'] path = QtGui.QFileDialog.getOpenFileName(self, "Open a .kwik file", folder)[0] # If a file has been selected, open it. if path: # Launch the loading task in the background asynchronously. self._path = path self.open_task.open(self.loader, path) # Save the folder. folder = os.path.dirname(path) SETTINGS['main_window.last_data_dir'] = folder SETTINGS['main_window.last_data_file'] = path def import_callback(self, checked=None): folder = SETTINGS['main_window.last_data_dir'] path = QtGui.QFileDialog.getOpenFileName(self, "Open a .kwik file", folder)[0] # If a file has been selected, open it. if path and self.loader is not None: clu = read_clusters(path) # TODO self.open_done() def save_callback(self, checked=None): self.open_task.save(self.loader) def reset_callback(self, checked=None): # reply = QtGui.QMessageBox.question(self, 'Reset clustering', # "Do you *really* want to erase permanently your manual clustering and reset it to the original (automatic) clustering? You won't be able to undo this operation!", # ( # QtGui.QMessageBox.Yes | # QtGui.QMessageBox.Cancel # ), # QtGui.QMessageBox.Cancel) # if reply == QtGui.QMessageBox.Yes: clustering_name, ok = QtGui.QInputDialog.getText(self, "Clustering name", "Copy from (you'll lose the current clustering):", QtGui.QLineEdit.Normal, 'original') if ok: self.loader.copy_clustering(clustering_from=clustering_name, clustering_to='main') # Reload the file. self.loader.close() self.open_task.open(self.loader, self._path) # elif reply == QtGui.QMessageBox.Cancel: # return # def renumber_callback(self, checked=None): # # folder = SETTINGS.get('main_window.last_data_file') # self.loader.save(renumber=True) # # self.need_save = False # self.open_last_callback() def open_last_callback(self, checked=None): path = SETTINGS['main_window.last_data_file'] if path: self._path = path self.open_task.open(self.loader, path) def close_callback(self, checked=None): # clusters = self.get_view('ClusterView').selected_clusters() # if clusters: # self.get_view('ClusterView').unselect() # time.sleep(.25) # Clear the views. self.clear_view('ClusterView') self.clear_view('SimilarityMatrixView') self.clear_view('FeatureView') self.clear_view('WaveformView') self.clear_view('CorrelogramsView') self.clear_view('TraceView') self.loader.close() self.is_file_open = False def switch_callback(self, checked=None): shank, ok = QtGui.QInputDialog.getInt(self, "Shank number", "Shank number:", self.loader.shank, min(self.loader.shanks), max(self.loader.shanks), 1) if ok: if shank in self.loader.shanks: self.loader.set_shank(shank) self.open_done() else: QtGui.QMessageBox.warning(self, "Wrong shank number", ("The selected shank '{0:d}' is not in " "the list of shanks: {1:s}.").format(shank, str(self.loader.shanks)), QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok) def clear_view(self, view_name): for v in self.get_views(view_name): v.set_data() if hasattr(v, 'clear'): v.clear() def quit_callback(self, checked=None): self.close() # Open callbacks. # -------------- def open_done(self): self.is_file_open = True self.setWindowTitle('KlustaViewa: {0:s}'.format( os.path.basename(self.loader.filename) )) register(FileLogger(self.loader.log_filename, name='kwik', level=logging.INFO)) # Start the selection buffer. self.buffer = Buffer(self, # delay_timer=.1, delay_buffer=.2 delay_timer=USERPREF['delay_timer'], delay_buffer=USERPREF['delay_buffer'] ) self.buffer.start() self.buffer.accepted.connect(self.buffer_accepted_callback) # HACK: force release of Control key. self.force_key_release() clusters = self.get_view('ClusterView').selected_clusters() if clusters: self.get_view('ClusterView').unselect() # Create the Controller. self.controller = Controller(self.loader) # Create the cache for the cluster statistics that need to be # computed in the background. self.statscache = StatsCache(SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT)) # Update stats cache in IPython view. ipython = self.get_view('IPythonView') if ipython: ipython.set_data(stats=self.statscache) # Initialize the wizard. self.wizard = Wizard() # Update the task graph. self.taskgraph.set(self) # self.taskgraph.update_projection_view() self.taskgraph.update_cluster_view() self.taskgraph.compute_similarity_matrix() # self.taskgraph.update_trace_view() def open_failed(self, message): self.open_progress.setValue(0) QtGui.QMessageBox.warning(self, "Error while opening the file", "An error occurred: {0:s}".format(message), QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok) def open_progress_reported(self, progress, progress_max): self.open_progress.setMaximum(progress_max) self.open_progress.setValue(progress) def save_progress_reported(self, progress, progress_max): self.save_progress.setMaximum(progress_max) self.save_progress.setValue(progress) def save_done(self): self.need_save = False # Selection methods. # ------------------ def buffer_accepted_callback(self, (clusters, wizard)): self._wizard = wizard # The wizard boolean specifies whether the autozoom is activated or not. self.taskgraph.select(clusters, wizard and self.automatic_projection_action.isChecked(), ) def clusters_selected_callback(self, clusters, wizard=False): self.buffer.request((clusters, wizard)) def cluster_pair_selected_callback(self, clusters): """Callback when the user clicks on a pair in the SimilarityMatrixView.""" self.get_view('ClusterView').select(clusters,) # Views menu callbacks. # --------------------- def add_feature_view_callback(self, checked=None): self.add_feature_view(do_update=True, floating=True) def add_waveform_view_callback(self, checked=None): self.add_waveform_view(do_update=True, floating=True) def add_similarity_matrix_view_callback(self, checked=None): self.add_similarity_matrix_view(do_update=True, floating=True) def add_correlograms_view_callback(self, checked=None): self.add_correlograms_view(do_update=True, floating=True) # def add_trace_view_callback(self, checked=None): # self.add_trace_view(do_update=True, floating=True) def add_log_view_callback(self, checked=None): self.add_log_view() def add_ipython_view_callback(self, checked=None): self.add_ipython_view() def reset_views_callback(self, checked=None): # Delete all views. for key, views in self.views.iteritems(): for view in views: self.removeDockWidget(view.parent()) self.views[key] = [] # Re-create the default views. self.create_default_views(do_update=self.is_file_open, floating=False) def toggle_fullscreen_callback(self): if self.isFullScreen(): self.showNormal() else: self.showFullScreen() # Override color callback. # ------------------------ def override_color_callback(self, checked=None): self.override_color = not self.override_color self.taskgraph.override_color(self.override_color) # Correlograms callbacks. # ----------------------- def change_ncorrbins_callback(self, checked=None): if not self.loader: return corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins # duration = self.loader.get_correlogram_window() duration_new, ok = QtGui.QInputDialog.getDouble(self, "Correlograms time window", "Half width (ms):", duration / 2 * 1000, 1, 100000, 1) if ok: duration_new = duration_new * .001 * 2 ncorrbins_new = 2 * int(np.ceil(.5 * duration_new / corrbin)) # ncorrbins_new = int(duration_new / corrbin * .001) SETTINGS['correlograms.ncorrbins'] = ncorrbins_new self.taskgraph.change_correlograms_parameters(ncorrbins=ncorrbins_new) def recluster_callback(self, checked=None): self.taskgraph.recluster() def change_corrbin_callback(self, checked=None): if not self.loader: return # ncorrbins = self.loader.ncorrbins # corrbin = self.loader.corrbin # duration = self.loader.get_correlogram_window() corrbin = SETTINGS.get('correlograms.corrbin', CORRBIN_DEFAULT) ncorrbins = SETTINGS.get('correlograms.ncorrbins', NCORRBINS_DEFAULT) duration = corrbin * ncorrbins corrbin_new, ok = QtGui.QInputDialog.getDouble(self, "Correlograms bin size", "Bin size (ms):", corrbin * 1000, .01, 1000, 2) if ok: corrbin_new = corrbin_new * .001 ncorrbins_new = 2 * int(np.ceil(.5 * duration/ corrbin_new)) SETTINGS['correlograms.corrbin'] = corrbin_new SETTINGS['correlograms.ncorrbins'] = ncorrbins_new self.taskgraph.change_correlograms_parameters(corrbin=corrbin_new, ncorrbins=ncorrbins_new) def change_corr_normalization_callback(self, checked=None): [view.change_normalization() for view in self.get_views('CorrelogramsView')] # Actions callbacks. # ------------------ def merge_callback(self, checked=None): if self.is_busy: return self.need_save = True cluster_view = self.get_view('ClusterView') clusters = cluster_view.selected_clusters() self.taskgraph.merge(clusters, self._wizard) self.update_action_enabled() def split_callback(self, checked=None): if self.is_busy: return self.need_save = True cluster_view = self.get_view('ClusterView') clusters = cluster_view.selected_clusters() spikes_selected = self.spikes_selected # Cancel the selection after the split. self.spikes_selected = [] self.taskgraph.split(clusters, spikes_selected, self._wizard) self.update_action_enabled() def undo_callback(self, checked=None): if self.is_busy: return self.taskgraph.undo(self._wizard) self.update_action_enabled() def redo_callback(self, checked=None): if self.is_busy: return self.taskgraph.redo(self._wizard) self.update_action_enabled() def cluster_color_changed_callback(self, cluster, color): self.taskgraph.cluster_color_changed(cluster, color, self._wizard) self.update_action_enabled() def group_color_changed_callback(self, group, color): self.taskgraph.group_color_changed(group, color) self.update_action_enabled() def group_renamed_callback(self, group, name): self.taskgraph.group_renamed(group, name) self.update_action_enabled() def clusters_moved_callback(self, clusters, group): self.taskgraph.clusters_moved(clusters, group) self.update_action_enabled() def group_removed_callback(self, group): self.taskgraph.group_removed(group) self.update_action_enabled() def group_added_callback(self, group, name, color): self.taskgraph.group_added(group, name, color) self.update_action_enabled() # Wizard callbacks. # ----------------- def reset_navigation_callback(self, checked=None): self.taskgraph.wizard_reset() def previous_candidate_callback(self, checked=None): # Previous candidate. self.taskgraph.wizard_previous_candidate() def next_candidate_callback(self, checked=None): if self.is_busy: return # Skip candidate. self.taskgraph.wizard_next_candidate() def skip_target_callback(self, checked=None): if self.is_busy: return # Skip target. self.taskgraph.wizard_skip_target() def next_target_callback(self, checked=None): if self.is_busy: return # Move target to Good group, and select next target. self.taskgraph.wizard_move_and_next('target', 2) def delete_candidate_noise_callback(self, checked=None): self.taskgraph.wizard_move_and_next('candidate', 0) def delete_candidate_callback(self, checked=None): self.taskgraph.wizard_move_and_next('candidate', 1) def delete_target_noise_callback(self, checked=None): self.taskgraph.wizard_move_and_next('target', 0) def delete_target_callback(self, checked=None): self.taskgraph.wizard_move_and_next('target', 1) def delete_both_noise_callback(self, checked=None): self.taskgraph.wizard_move_and_next('both', 0) def delete_both_callback(self, checked=None): self.taskgraph.wizard_move_and_next('both', 1) def change_candidate_color_callback(self, checked=None): self.taskgraph.wizard_change_candidate_color() self.update_action_enabled() # Views callbacks. # ---------------- def waveform_spikes_highlighted_callback(self, spikes): self.spikes_highlighted = spikes [view.highlight_spikes(get_array(spikes)) for view in self.get_views('FeatureView')] def features_spikes_highlighted_callback(self, spikes): self.spikes_highlighted = spikes [view.highlight_spikes(get_array(spikes)) for view in self.get_views('WaveformView')] def features_spikes_selected_callback(self, spikes): self.spikes_selected = spikes self.update_action_enabled() [view.highlight_spikes(get_array(spikes)) for view in self.get_views('WaveformView')] def waveform_box_clicked_callback(self, coord, cluster, channel): """Changed in waveform ==> change in feature""" [view.set_projection(coord, channel, -1) for view in self.get_views('FeatureView')] # Help callbacks. # --------------- def manual_callback(self, checked=None): url = "https://github.com/klusta-team/klustaviewa/tree/master/docs/manual.md" webbrowser.open(url) def about_callback(self, checked=None): QtGui.QMessageBox.about(self, "KlustaViewa", ABOUT) def shortcuts_callback(self, checked=None): e = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, QtCore.Qt.Key_H, QtCore.Qt.NoModifier,) self.keyPressEvent(e) self.keyReleaseEvent(e) def open_preferences_callback(self, checked=None): url = USERPREF.filepath log.debug("Opening preferences file at '{0:s}'".format(url)) QtGui.QDesktopServices.openUrl(QtCore.QUrl('file:///' + url)) def refresh_preferences_callback(self, checked=None): log.debug("Refreshing user preferences.") USERPREF.refresh() # Geometry. # --------- def save_geometry(self): """Save the arrangement of the whole window.""" SETTINGS['main_window.views'] = {name: len(self.get_views(name)) for name in self.views.keys()} SETTINGS['main_window.geometry'] = encode_bytearray( self.saveGeometry()) SETTINGS['main_window.state'] = encode_bytearray(self.saveState()) def restore_geometry(self): """Restore the arrangement of the whole window.""" g = SETTINGS['main_window.geometry'] s = SETTINGS['main_window.state'] if s: self.restoreState(decode_bytearray(s)) if g: self.restoreGeometry(decode_bytearray(g)) # Event handlers. # --------------- def force_key_release(self): """HACK: force release of Ctrl, Shift and Alt when focus out.""" self.keyReleaseEvent(QtGui.QKeyEvent(QtCore.QEvent.KeyRelease, QtCore.Qt.Key_Control, QtCore.Qt.NoModifier)) self.keyReleaseEvent(QtGui.QKeyEvent(QtCore.QEvent.KeyRelease, QtCore.Qt.Key_Shift, QtCore.Qt.NoModifier)) self.keyReleaseEvent(QtGui.QKeyEvent(QtCore.QEvent.KeyRelease, QtCore.Qt.Key_Alt, QtCore.Qt.NoModifier)) def event(self, e): if e.type() == QtCore.QEvent.WindowActivate: pass elif e.type() == QtCore.QEvent.WindowDeactivate: self.force_key_release() return super(MainWindow, self).event(e) def contextMenuEvent(self, e): """Disable the context menu in the main window.""" return def keyPressEvent(self, e): super(MainWindow, self).keyPressEvent(e) for views in self.views.values(): [view.keyPressEvent(e) for view in views] def keyReleaseEvent(self, e): super(MainWindow, self).keyReleaseEvent(e) for views in self.views.values(): [view.keyReleaseEvent(e) for view in views] def closeEvent(self, e): prompt_save_on_exit = USERPREF['prompt_save_on_exit'] if prompt_save_on_exit is None: prompt_save_on_exit = True if self.need_save and prompt_save_on_exit: reply = QtGui.QMessageBox.question(self, 'Save', "Do you want to save?", ( QtGui.QMessageBox.Save | QtGui.QMessageBox.Close | QtGui.QMessageBox.Cancel ), QtGui.QMessageBox.Save) if reply == QtGui.QMessageBox.Save: folder = SETTINGS.get('main_window.last_data_file') self.loader.save() elif reply == QtGui.QMessageBox.Cancel: e.ignore() return elif reply == QtGui.QMessageBox.Close: pass # Save the window geometry when closing the software. self.save_geometry() # End the threads. self.join_threads() # Close the loader. self.loader.close() # Close all views. for views in self.views.values(): for view in views: if hasattr(view, 'closeEvent'): view.closeEvent(e) # Close the logger file. if self.dolog: close_file_logger() # Close the main window. return super(MainWindow, self).closeEvent(e) def sizeHint(self): return QtCore.QSize(1200, 800) # ----------------------------------------------------------------------------- # File logger # ----------------------------------------------------------------------------- def create_file_logger(): global LOGGER_FILE LOGFILENAME = get_global_path('logfile.txt') LOGGER_FILE = FileLogger(LOGFILENAME, name='file', level=USERPREF['loglevel_file']) register(LOGGER_FILE) def close_file_logger(): unregister(LOGGER_FILE)
bsd-3-clause
tmhm/scikit-learn
examples/applications/plot_stock_market.py
227
8284
""" ======================================= Visualizing the stock market structure ======================================= This example employs several unsupervised learning techniques to extract the stock market structure from variations in historical quotes. The quantity that we use is the daily variation in quote price: quotes that are linked tend to cofluctuate during a day. .. _stock_market: Learning a graph structure -------------------------- We use sparse inverse covariance estimation to find which quotes are correlated conditionally on the others. Specifically, sparse inverse covariance gives us a graph, that is a list of connection. For each symbol, the symbols that it is connected too are those useful to explain its fluctuations. Clustering ---------- We use clustering to group together quotes that behave similarly. Here, amongst the :ref:`various clustering techniques <clustering>` available in the scikit-learn, we use :ref:`affinity_propagation` as it does not enforce equal-size clusters, and it can choose automatically the number of clusters from the data. Note that this gives us a different indication than the graph, as the graph reflects conditional relations between variables, while the clustering reflects marginal properties: variables clustered together can be considered as having a similar impact at the level of the full stock market. Embedding in 2D space --------------------- For visualization purposes, we need to lay out the different symbols on a 2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D embedding. Visualization ------------- The output of the 3 models are combined in a 2D graph where nodes represents the stocks and edges the: - cluster labels are used to define the color of the nodes - the sparse covariance model is used to display the strength of the edges - the 2D embedding is used to position the nodes in the plan This example has a fair amount of visualization-related code, as visualization is crucial here to display the graph. One of the challenge is to position the labels minimizing overlap. For this we use an heuristic based on the direction of the nearest neighbor along each axis. """ print(__doc__) # Author: Gael Varoquaux gael.varoquaux@normalesup.org # License: BSD 3 clause import datetime import numpy as np import matplotlib.pyplot as plt from matplotlib import finance from matplotlib.collections import LineCollection from sklearn import cluster, covariance, manifold ############################################################################### # Retrieve the data from Internet # Choose a time period reasonnably calm (not too long ago so that we get # high-tech firms, and before the 2008 crash) d1 = datetime.datetime(2003, 1, 1) d2 = datetime.datetime(2008, 1, 1) # kraft symbol has now changed from KFT to MDLZ in yahoo symbol_dict = { 'TOT': 'Total', 'XOM': 'Exxon', 'CVX': 'Chevron', 'COP': 'ConocoPhillips', 'VLO': 'Valero Energy', 'MSFT': 'Microsoft', 'IBM': 'IBM', 'TWX': 'Time Warner', 'CMCSA': 'Comcast', 'CVC': 'Cablevision', 'YHOO': 'Yahoo', 'DELL': 'Dell', 'HPQ': 'HP', 'AMZN': 'Amazon', 'TM': 'Toyota', 'CAJ': 'Canon', 'MTU': 'Mitsubishi', 'SNE': 'Sony', 'F': 'Ford', 'HMC': 'Honda', 'NAV': 'Navistar', 'NOC': 'Northrop Grumman', 'BA': 'Boeing', 'KO': 'Coca Cola', 'MMM': '3M', 'MCD': 'Mc Donalds', 'PEP': 'Pepsi', 'MDLZ': 'Kraft Foods', 'K': 'Kellogg', 'UN': 'Unilever', 'MAR': 'Marriott', 'PG': 'Procter Gamble', 'CL': 'Colgate-Palmolive', 'GE': 'General Electrics', 'WFC': 'Wells Fargo', 'JPM': 'JPMorgan Chase', 'AIG': 'AIG', 'AXP': 'American express', 'BAC': 'Bank of America', 'GS': 'Goldman Sachs', 'AAPL': 'Apple', 'SAP': 'SAP', 'CSCO': 'Cisco', 'TXN': 'Texas instruments', 'XRX': 'Xerox', 'LMT': 'Lookheed Martin', 'WMT': 'Wal-Mart', 'WBA': 'Walgreen', 'HD': 'Home Depot', 'GSK': 'GlaxoSmithKline', 'PFE': 'Pfizer', 'SNY': 'Sanofi-Aventis', 'NVS': 'Novartis', 'KMB': 'Kimberly-Clark', 'R': 'Ryder', 'GD': 'General Dynamics', 'RTN': 'Raytheon', 'CVS': 'CVS', 'CAT': 'Caterpillar', 'DD': 'DuPont de Nemours'} symbols, names = np.array(list(symbol_dict.items())).T quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True) for symbol in symbols] open = np.array([q.open for q in quotes]).astype(np.float) close = np.array([q.close for q in quotes]).astype(np.float) # The daily variations of the quotes are what carry most information variation = close - open ############################################################################### # Learn a graphical structure from the correlations edge_model = covariance.GraphLassoCV() # standardize the time series: using correlations rather than covariance # is more efficient for structure recovery X = variation.copy().T X /= X.std(axis=0) edge_model.fit(X) ############################################################################### # Cluster using affinity propagation _, labels = cluster.affinity_propagation(edge_model.covariance_) n_labels = labels.max() for i in range(n_labels + 1): print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i]))) ############################################################################### # Find a low-dimension embedding for visualization: find the best position of # the nodes (the stocks) on a 2D plane # We use a dense eigen_solver to achieve reproducibility (arpack is # initiated with random vectors that we don't control). In addition, we # use a large number of neighbors to capture the large-scale structure. node_position_model = manifold.LocallyLinearEmbedding( n_components=2, eigen_solver='dense', n_neighbors=6) embedding = node_position_model.fit_transform(X.T).T ############################################################################### # Visualization plt.figure(1, facecolor='w', figsize=(10, 8)) plt.clf() ax = plt.axes([0., 0., 1., 1.]) plt.axis('off') # Display a graph of the partial correlations partial_correlations = edge_model.precision_.copy() d = 1 / np.sqrt(np.diag(partial_correlations)) partial_correlations *= d partial_correlations *= d[:, np.newaxis] non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02) # Plot the nodes using the coordinates of our embedding plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels, cmap=plt.cm.spectral) # Plot the edges start_idx, end_idx = np.where(non_zero) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[embedding[:, start], embedding[:, stop]] for start, stop in zip(start_idx, end_idx)] values = np.abs(partial_correlations[non_zero]) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, .7 * values.max())) lc.set_array(values) lc.set_linewidths(15 * values) ax.add_collection(lc) # Add a label to each node. The challenge here is that we want to # position the labels to avoid overlap with other labels for index, (name, label, (x, y)) in enumerate( zip(names, labels, embedding.T)): dx = x - embedding[0] dx[index] = 1 dy = y - embedding[1] dy[index] = 1 this_dx = dx[np.argmin(np.abs(dy))] this_dy = dy[np.argmin(np.abs(dx))] if this_dx > 0: horizontalalignment = 'left' x = x + .002 else: horizontalalignment = 'right' x = x - .002 if this_dy > 0: verticalalignment = 'bottom' y = y + .002 else: verticalalignment = 'top' y = y - .002 plt.text(x, y, name, size=10, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, bbox=dict(facecolor='w', edgecolor=plt.cm.spectral(label / float(n_labels)), alpha=.6)) plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(), embedding[0].max() + .10 * embedding[0].ptp(),) plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(), embedding[1].max() + .03 * embedding[1].ptp()) plt.show()
bsd-3-clause
quheng/scikit-learn
sklearn/pipeline.py
61
21271
""" The :mod:`sklearn.pipeline` module implements utilities to build a composite estimator, as a chain of transforms and estimators. """ # Author: Edouard Duchesnay # Gael Varoquaux # Virgile Fritsch # Alexandre Gramfort # Lars Buitinck # Licence: BSD from collections import defaultdict from warnings import warn import numpy as np from scipy import sparse from .base import BaseEstimator, TransformerMixin from .externals.joblib import Parallel, delayed from .externals import six from .utils import tosequence from .utils.metaestimators import if_delegate_has_method from .externals.six import iteritems __all__ = ['Pipeline', 'FeatureUnion'] class Pipeline(BaseEstimator): """Pipeline of transforms with a final estimator. Sequentially apply a list of transforms and a final estimator. Intermediate steps of the pipeline must be 'transforms', that is, they must implement fit and transform methods. The final estimator only needs to implement fit. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. For this, it enables setting parameters of the various steps using their names and the parameter name separated by a '__', as in the example below. Read more in the :ref:`User Guide <pipeline>`. Parameters ---------- steps : list List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator. Attributes ---------- named_steps : dict Read-only attribute to access any step parameter by user given name. Keys are step names and values are steps parameters. Examples -------- >>> from sklearn import svm >>> from sklearn.datasets import samples_generator >>> from sklearn.feature_selection import SelectKBest >>> from sklearn.feature_selection import f_regression >>> from sklearn.pipeline import Pipeline >>> # generate some data to play with >>> X, y = samples_generator.make_classification( ... n_informative=5, n_redundant=0, random_state=42) >>> # ANOVA SVM-C >>> anova_filter = SelectKBest(f_regression, k=5) >>> clf = svm.SVC(kernel='linear') >>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)]) >>> # You can set the parameters using the names issued >>> # For instance, fit using a k of 10 in the SelectKBest >>> # and a parameter 'C' of the svm >>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y) ... # doctest: +ELLIPSIS Pipeline(steps=[...]) >>> prediction = anova_svm.predict(X) >>> anova_svm.score(X, y) # doctest: +ELLIPSIS 0.77... >>> # getting the selected features chosen by anova_filter >>> anova_svm.named_steps['anova'].get_support() ... # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, False, False, True, False, True, True, True, False, False, True, False, True, False, False, False, False, True], dtype=bool) """ # BaseEstimator interface def __init__(self, steps): names, estimators = zip(*steps) if len(dict(steps)) != len(steps): raise ValueError("Provided step names are not unique: %s" % (names,)) # shallow copy of steps self.steps = tosequence(steps) transforms = estimators[:-1] estimator = estimators[-1] for t in transforms: if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(t, "transform")): raise TypeError("All intermediate steps of the chain should " "be transforms and implement fit and transform" " '%s' (type %s) doesn't)" % (t, type(t))) if not hasattr(estimator, "fit"): raise TypeError("Last step of chain should implement fit " "'%s' (type %s) doesn't)" % (estimator, type(estimator))) @property def _estimator_type(self): return self.steps[-1][1]._estimator_type def get_params(self, deep=True): if not deep: return super(Pipeline, self).get_params(deep=False) else: out = self.named_steps for name, step in six.iteritems(self.named_steps): for key, value in six.iteritems(step.get_params(deep=True)): out['%s__%s' % (name, key)] = value out.update(super(Pipeline, self).get_params(deep=False)) return out @property def named_steps(self): return dict(self.steps) @property def _final_estimator(self): return self.steps[-1][1] # Estimator interface def _pre_transform(self, X, y=None, **fit_params): fit_params_steps = dict((step, {}) for step, _ in self.steps) for pname, pval in six.iteritems(fit_params): step, param = pname.split('__', 1) fit_params_steps[step][param] = pval Xt = X for name, transform in self.steps[:-1]: if hasattr(transform, "fit_transform"): Xt = transform.fit_transform(Xt, y, **fit_params_steps[name]) else: Xt = transform.fit(Xt, y, **fit_params_steps[name]) \ .transform(Xt) return Xt, fit_params_steps[self.steps[-1][0]] def fit(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) self.steps[-1][-1].fit(Xt, y, **fit_params) return self def fit_transform(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then use fit_transform on transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) if hasattr(self.steps[-1][-1], 'fit_transform'): return self.steps[-1][-1].fit_transform(Xt, y, **fit_params) else: return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict(self, X): """Applies transforms to the data, and the predict method of the final estimator. Valid only if the final estimator implements predict. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict(Xt) @if_delegate_has_method(delegate='_final_estimator') def fit_predict(self, X, y=None, **fit_params): """Applies fit_predict of last step in pipeline after transforms. Applies fit_transforms of a pipeline to the data, followed by the fit_predict method of the final estimator in the pipeline. Valid only if the final estimator implements fit_predict. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) return self.steps[-1][-1].fit_predict(Xt, y, **fit_params) @if_delegate_has_method(delegate='_final_estimator') def predict_proba(self, X): """Applies transforms to the data, and the predict_proba method of the final estimator. Valid only if the final estimator implements predict_proba. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_proba(Xt) @if_delegate_has_method(delegate='_final_estimator') def decision_function(self, X): """Applies transforms to the data, and the decision_function method of the final estimator. Valid only if the final estimator implements decision_function. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].decision_function(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict_log_proba(self, X): """Applies transforms to the data, and the predict_log_proba method of the final estimator. Valid only if the final estimator implements predict_log_proba. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_log_proba(Xt) @if_delegate_has_method(delegate='_final_estimator') def transform(self, X): """Applies transforms to the data, and the transform method of the final estimator. Valid only if the final estimator implements transform. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps: Xt = transform.transform(Xt) return Xt @if_delegate_has_method(delegate='_final_estimator') def inverse_transform(self, X): """Applies inverse transform to the data. Starts with the last step of the pipeline and applies ``inverse_transform`` in inverse order of the pipeline steps. Valid only if all steps of the pipeline implement inverse_transform. Parameters ---------- X : iterable Data to inverse transform. Must fulfill output requirements of the last step of the pipeline. """ if X.ndim == 1: warn("From version 0.19, a 1d X will not be reshaped in" " pipeline.inverse_transform any more.", FutureWarning) X = X[None, :] Xt = X for name, step in self.steps[::-1]: Xt = step.inverse_transform(Xt) return Xt @if_delegate_has_method(delegate='_final_estimator') def score(self, X, y=None): """Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score. Parameters ---------- X : iterable Data to score. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].score(Xt, y) @property def classes_(self): return self.steps[-1][-1].classes_ @property def _pairwise(self): # check if first estimator expects pairwise input return getattr(self.steps[0][1], '_pairwise', False) def _name_estimators(estimators): """Generate names for estimators.""" names = [type(estimator).__name__.lower() for estimator in estimators] namecount = defaultdict(int) for est, name in zip(estimators, names): namecount[name] += 1 for k, v in list(six.iteritems(namecount)): if v == 1: del namecount[k] for i in reversed(range(len(estimators))): name = names[i] if name in namecount: names[i] += "-%d" % namecount[name] namecount[name] -= 1 return list(zip(names, estimators)) def make_pipeline(*steps): """Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, they will be given names automatically based on their types. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE Pipeline(steps=[('standardscaler', StandardScaler(copy=True, with_mean=True, with_std=True)), ('gaussiannb', GaussianNB())]) Returns ------- p : Pipeline """ return Pipeline(_name_estimators(steps)) def _fit_one_transformer(transformer, X, y): return transformer.fit(X, y) def _transform_one(transformer, name, X, transformer_weights): if transformer_weights is not None and name in transformer_weights: # if we have a weight for this transformer, muliply output return transformer.transform(X) * transformer_weights[name] return transformer.transform(X) def _fit_transform_one(transformer, name, X, y, transformer_weights, **fit_params): if transformer_weights is not None and name in transformer_weights: # if we have a weight for this transformer, muliply output if hasattr(transformer, 'fit_transform'): X_transformed = transformer.fit_transform(X, y, **fit_params) return X_transformed * transformer_weights[name], transformer else: X_transformed = transformer.fit(X, y, **fit_params).transform(X) return X_transformed * transformer_weights[name], transformer if hasattr(transformer, 'fit_transform'): X_transformed = transformer.fit_transform(X, y, **fit_params) return X_transformed, transformer else: X_transformed = transformer.fit(X, y, **fit_params).transform(X) return X_transformed, transformer class FeatureUnion(BaseEstimator, TransformerMixin): """Concatenates results of multiple transformer objects. This estimator applies a list of transformer objects in parallel to the input data, then concatenates the results. This is useful to combine several feature extraction mechanisms into a single transformer. Read more in the :ref:`User Guide <feature_union>`. Parameters ---------- transformer_list: list of (string, transformer) tuples List of transformer objects to be applied to the data. The first half of each tuple is the name of the transformer. n_jobs: int, optional Number of jobs to run in parallel (default 1). transformer_weights: dict, optional Multiplicative weights for features per transformer. Keys are transformer names, values the weights. """ def __init__(self, transformer_list, n_jobs=1, transformer_weights=None): self.transformer_list = transformer_list self.n_jobs = n_jobs self.transformer_weights = transformer_weights def get_feature_names(self): """Get feature names from all transformers. Returns ------- feature_names : list of strings Names of the features produced by transform. """ feature_names = [] for name, trans in self.transformer_list: if not hasattr(trans, 'get_feature_names'): raise AttributeError("Transformer %s does not provide" " get_feature_names." % str(name)) feature_names.extend([name + "__" + f for f in trans.get_feature_names()]) return feature_names def fit(self, X, y=None): """Fit all transformers using X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data, used to fit transformers. """ transformers = Parallel(n_jobs=self.n_jobs)( delayed(_fit_one_transformer)(trans, X, y) for name, trans in self.transformer_list) self._update_transformer_list(transformers) return self def fit_transform(self, X, y=None, **fit_params): """Fit all transformers using X, transform the data and concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ result = Parallel(n_jobs=self.n_jobs)( delayed(_fit_transform_one)(trans, name, X, y, self.transformer_weights, **fit_params) for name, trans in self.transformer_list) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def transform(self, X): """Transform X separately by each transformer, concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ Xs = Parallel(n_jobs=self.n_jobs)( delayed(_transform_one)(trans, name, X, self.transformer_weights) for name, trans in self.transformer_list) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def get_params(self, deep=True): if not deep: return super(FeatureUnion, self).get_params(deep=False) else: out = dict(self.transformer_list) for name, trans in self.transformer_list: for key, value in iteritems(trans.get_params(deep=True)): out['%s__%s' % (name, key)] = value out.update(super(FeatureUnion, self).get_params(deep=False)) return out def _update_transformer_list(self, transformers): self.transformer_list[:] = [ (name, new) for ((name, old), new) in zip(self.transformer_list, transformers) ] # XXX it would be nice to have a keyword-only n_jobs argument to this function, # but that's not allowed in Python 2.x. def make_union(*transformers): """Construct a FeatureUnion from the given transformers. This is a shorthand for the FeatureUnion constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE FeatureUnion(n_jobs=1, transformer_list=[('pca', PCA(copy=True, n_components=None, whiten=False)), ('truncatedsvd', TruncatedSVD(algorithm='randomized', n_components=2, n_iter=5, random_state=None, tol=0.0))], transformer_weights=None) Returns ------- f : FeatureUnion """ return FeatureUnion(_name_estimators(transformers))
bsd-3-clause
mojoboss/scikit-learn
examples/ensemble/plot_gradient_boosting_quantile.py
392
2114
""" ===================================================== Prediction Intervals for Gradient Boosting Regression ===================================================== This example shows how quantile regression can be used to create prediction intervals. """ import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import GradientBoostingRegressor np.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) #---------------------------------------------------------------------- # First the noiseless case X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T X = X.astype(np.float32) # Observations y = f(X).ravel() dy = 1.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise y = y.astype(np.float32) # Mesh the input space for evaluations of the real function, the prediction and # its MSE xx = np.atleast_2d(np.linspace(0, 10, 1000)).T xx = xx.astype(np.float32) alpha = 0.95 clf = GradientBoostingRegressor(loss='quantile', alpha=alpha, n_estimators=250, max_depth=3, learning_rate=.1, min_samples_leaf=9, min_samples_split=9) clf.fit(X, y) # Make the prediction on the meshed x-axis y_upper = clf.predict(xx) clf.set_params(alpha=1.0 - alpha) clf.fit(X, y) # Make the prediction on the meshed x-axis y_lower = clf.predict(xx) clf.set_params(loss='ls') clf.fit(X, y) # Make the prediction on the meshed x-axis y_pred = clf.predict(xx) # Plot the function, the prediction and the 90% confidence interval based on # the MSE fig = plt.figure() plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$') plt.plot(X, y, 'b.', markersize=10, label=u'Observations') plt.plot(xx, y_pred, 'r-', label=u'Prediction') plt.plot(xx, y_upper, 'k-') plt.plot(xx, y_lower, 'k-') plt.fill(np.concatenate([xx, xx[::-1]]), np.concatenate([y_upper, y_lower[::-1]]), alpha=.5, fc='b', ec='None', label='90% prediction interval') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.ylim(-10, 20) plt.legend(loc='upper left') plt.show()
bsd-3-clause
JPFrancoia/scikit-learn
sklearn/metrics/tests/test_pairwise.py
13
26241
import numpy as np from numpy import linalg from scipy.sparse import dok_matrix, csr_matrix, issparse from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_true from sklearn.utils.testing import ignore_warnings from sklearn.externals.six import iteritems from sklearn.metrics.pairwise import euclidean_distances from sklearn.metrics.pairwise import manhattan_distances from sklearn.metrics.pairwise import linear_kernel from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel from sklearn.metrics.pairwise import polynomial_kernel from sklearn.metrics.pairwise import rbf_kernel from sklearn.metrics.pairwise import laplacian_kernel from sklearn.metrics.pairwise import sigmoid_kernel from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics.pairwise import cosine_distances from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_distances_argmin_min from sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.metrics.pairwise import pairwise_kernels from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS from sklearn.metrics.pairwise import PAIRED_DISTANCES from sklearn.metrics.pairwise import check_pairwise_arrays from sklearn.metrics.pairwise import check_paired_arrays from sklearn.metrics.pairwise import paired_distances from sklearn.metrics.pairwise import paired_euclidean_distances from sklearn.metrics.pairwise import paired_manhattan_distances from sklearn.preprocessing import normalize from sklearn.exceptions import DataConversionWarning def test_pairwise_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) S = pairwise_distances(X, metric="euclidean") S2 = euclidean_distances(X) assert_array_almost_equal(S, S2) # Euclidean distance, with Y != X. Y = rng.random_sample((2, 4)) S = pairwise_distances(X, Y, metric="euclidean") S2 = euclidean_distances(X, Y) assert_array_almost_equal(S, S2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean") assert_array_almost_equal(S, S2) # "cityblock" uses scikit-learn metric, cityblock (function) is # scipy.spatial. S = pairwise_distances(X, metric="cityblock") S2 = pairwise_distances(X, metric=cityblock) assert_equal(S.shape[0], S.shape[1]) assert_equal(S.shape[0], X.shape[0]) assert_array_almost_equal(S, S2) # The manhattan metric should be equivalent to cityblock. S = pairwise_distances(X, Y, metric="manhattan") S2 = pairwise_distances(X, Y, metric=cityblock) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Low-level function for manhattan can divide in blocks to avoid # using too much memory during the broadcasting S3 = manhattan_distances(X, Y, size_threshold=10) assert_array_almost_equal(S, S3) # Test cosine as a string metric versus cosine callable # The string "cosine" uses sklearn.metric, # while the function cosine is scipy.spatial S = pairwise_distances(X, Y, metric="cosine") S2 = pairwise_distances(X, Y, metric=cosine) assert_equal(S.shape[0], X.shape[0]) assert_equal(S.shape[1], Y.shape[0]) assert_array_almost_equal(S, S2) # Test with sparse X and Y, # currently only supported for Euclidean, L1 and cosine. X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean") S2 = euclidean_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse, metric="cosine") S2 = cosine_distances(X_sparse, Y_sparse) assert_array_almost_equal(S, S2) S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan") S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo()) assert_array_almost_equal(S, S2) S2 = manhattan_distances(X, Y) assert_array_almost_equal(S, S2) # Test with scipy.spatial.distance metric, with a kwd kwds = {"p": 2.0} S = pairwise_distances(X, Y, metric="minkowski", **kwds) S2 = pairwise_distances(X, Y, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # same with Y = None kwds = {"p": 2.0} S = pairwise_distances(X, metric="minkowski", **kwds) S2 = pairwise_distances(X, metric=minkowski, **kwds) assert_array_almost_equal(S, S2) # Test that scipy distance metrics throw an error if sparse matrix given assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski") assert_raises(TypeError, pairwise_distances, X, Y_sparse, metric="minkowski") # Test that a value error is raised if the metric is unknown assert_raises(ValueError, pairwise_distances, X, Y, metric="blah") # ignore conversion to boolean in pairwise_distances @ignore_warnings(category=DataConversionWarning) def test_pairwise_boolean_distance(): # test that we convert to boolean arrays for boolean distances rng = np.random.RandomState(0) X = rng.randn(5, 4) Y = X.copy() Y[0, 0] = 1 - Y[0, 0] for metric in PAIRWISE_BOOLEAN_FUNCTIONS: for Z in [Y, None]: res = pairwise_distances(X, Z, metric=metric) res[np.isnan(res)] = 0 assert_true(np.sum(res != 0) == 0) def test_pairwise_precomputed(): for func in [pairwise_distances, pairwise_kernels]: # Test correct shape assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), metric='precomputed') # with two args assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), np.zeros((4, 4)), metric='precomputed') # even if shape[1] agrees (although thus second arg is spurious) assert_raises_regexp(ValueError, '.* shape .*', func, np.zeros((5, 3)), np.zeros((4, 3)), metric='precomputed') # Test not copied (if appropriate dtype) S = np.zeros((5, 5)) S2 = func(S, metric="precomputed") assert_true(S is S2) # with two args S = np.zeros((5, 3)) S2 = func(S, np.zeros((3, 3)), metric="precomputed") assert_true(S is S2) # Test always returns float dtype S = func(np.array([[1]], dtype='int'), metric='precomputed') assert_equal('f', S.dtype.kind) # Test converts list to array-like S = func([[1.]], metric='precomputed') assert_true(isinstance(S, np.ndarray)) def check_pairwise_parallel(func, metric, kwds): rng = np.random.RandomState(0) for make_data in (np.array, csr_matrix): X = make_data(rng.random_sample((5, 4))) Y = make_data(rng.random_sample((3, 4))) try: S = func(X, metric=metric, n_jobs=1, **kwds) except (TypeError, ValueError) as exc: # Not all metrics support sparse input # ValueError may be triggered by bad callable if make_data is csr_matrix: assert_raises(type(exc), func, X, metric=metric, n_jobs=2, **kwds) continue else: raise S2 = func(X, metric=metric, n_jobs=2, **kwds) assert_array_almost_equal(S, S2) S = func(X, Y, metric=metric, n_jobs=1, **kwds) S2 = func(X, Y, metric=metric, n_jobs=2, **kwds) assert_array_almost_equal(S, S2) def test_pairwise_parallel(): wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1} metrics = [(pairwise_distances, 'euclidean', {}), (pairwise_distances, wminkowski, wminkowski_kwds), (pairwise_distances, 'wminkowski', wminkowski_kwds), (pairwise_kernels, 'polynomial', {'degree': 1}), (pairwise_kernels, callable_rbf_kernel, {'gamma': .1}), ] for func, metric, kwds in metrics: yield check_pairwise_parallel, func, metric, kwds def test_pairwise_callable_nonstrict_metric(): # paired_distances should allow callable metric where metric(x, x) != 0 # Knowing that the callable is a strict metric would allow the diagonal to # be left uncalculated and set to 0. assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5) def callable_rbf_kernel(x, y, **kwds): # Callable version of pairwise.rbf_kernel. K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds) return K def test_pairwise_kernels(): # Test the pairwise_kernels helper function. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) # Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS. test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear", "chi2", "additive_chi2"] for metric in test_metrics: function = PAIRWISE_KERNEL_FUNCTIONS[metric] # Test with Y=None K1 = pairwise_kernels(X, metric=metric) K2 = function(X) assert_array_almost_equal(K1, K2) # Test with Y=Y K1 = pairwise_kernels(X, Y=Y, metric=metric) K2 = function(X, Y=Y) assert_array_almost_equal(K1, K2) # Test with tuples as X and Y X_tuples = tuple([tuple([v for v in row]) for row in X]) Y_tuples = tuple([tuple([v for v in row]) for row in Y]) K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric) assert_array_almost_equal(K1, K2) # Test with sparse X and Y X_sparse = csr_matrix(X) Y_sparse = csr_matrix(Y) if metric in ["chi2", "additive_chi2"]: # these don't support sparse matrices yet assert_raises(ValueError, pairwise_kernels, X_sparse, Y=Y_sparse, metric=metric) continue K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric) assert_array_almost_equal(K1, K2) # Test with a callable function, with given keywords. metric = callable_rbf_kernel kwds = {'gamma': 0.1} K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds) K2 = rbf_kernel(X, Y=Y, **kwds) assert_array_almost_equal(K1, K2) # callable function, X=Y K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds) K2 = rbf_kernel(X, Y=X, **kwds) assert_array_almost_equal(K1, K2) def test_pairwise_kernels_filter_param(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((2, 4)) K = rbf_kernel(X, Y, gamma=0.1) params = {"gamma": 0.1, "blabla": ":)"} K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params) assert_array_almost_equal(K, K2) assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params) def test_paired_distances(): # Test the pairwise_distance helper function. rng = np.random.RandomState(0) # Euclidean distance should be equivalent to calling the function. X = rng.random_sample((5, 4)) # Euclidean distance, with Y != X. Y = rng.random_sample((5, 4)) for metric, func in iteritems(PAIRED_DISTANCES): S = paired_distances(X, Y, metric=metric) S2 = func(X, Y) assert_array_almost_equal(S, S2) S3 = func(csr_matrix(X), csr_matrix(Y)) assert_array_almost_equal(S, S3) if metric in PAIRWISE_DISTANCE_FUNCTIONS: # Check the pairwise_distances implementation # gives the same value distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y) distances = np.diag(distances) assert_array_almost_equal(distances, S) # Check the callable implementation S = paired_distances(X, Y, metric='manhattan') S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0)) assert_array_almost_equal(S, S2) # Test that a value error is raised when the lengths of X and Y should not # differ Y = rng.random_sample((3, 4)) assert_raises(ValueError, paired_distances, X, Y) def test_pairwise_distances_argmin_min(): # Check pairwise minimum distances computation for any metric X = [[0], [1]] Y = [[-1], [2]] Xsp = dok_matrix(X) Ysp = csr_matrix(Y, dtype=np.float32) # euclidean metric D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean") D2 = pairwise_distances_argmin(X, Y, metric="euclidean") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # sparse matrix case Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean") assert_array_equal(Dsp, D) assert_array_equal(Esp, E) # We don't want np.matrix here assert_equal(type(Dsp), np.ndarray) assert_equal(type(Esp), np.ndarray) # Non-euclidean scikit-learn metric D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan") D2 = pairwise_distances_argmin(X, Y, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(D2, [0, 1]) assert_array_almost_equal(E, [1., 1.]) D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan") D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan") assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (callable) D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski, metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Non-euclidean Scipy distance (string) D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski", metric_kwargs={"p": 2}) assert_array_almost_equal(D, [0, 1]) assert_array_almost_equal(E, [1., 1.]) # Compare with naive implementation rng = np.random.RandomState(0) X = rng.randn(97, 149) Y = rng.randn(111, 149) dist = pairwise_distances(X, Y, metric="manhattan") dist_orig_ind = dist.argmin(axis=0) dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))] dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min( X, Y, axis=0, metric="manhattan", batch_size=50) np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7) np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7) def test_euclidean_distances(): # Check the pairwise Euclidean distances computation X = [[0]] Y = [[1], [2]] D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) X = csr_matrix(X) Y = csr_matrix(Y) D = euclidean_distances(X, Y) assert_array_almost_equal(D, [[1., 2.]]) rng = np.random.RandomState(0) X = rng.random_sample((10, 4)) Y = rng.random_sample((20, 4)) X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1) Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1) # check that we still get the right answers with {X,Y}_norm_squared D1 = euclidean_distances(X, Y) D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq) D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq) D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq, Y_norm_squared=Y_norm_sq) assert_array_almost_equal(D2, D1) assert_array_almost_equal(D3, D1) assert_array_almost_equal(D4, D1) # check we get the wrong answer with wrong {X,Y}_norm_squared X_norm_sq *= 0.5 Y_norm_sq *= 0.5 wrong_D = euclidean_distances(X, Y, X_norm_squared=np.zeros_like(X_norm_sq), Y_norm_squared=np.zeros_like(Y_norm_sq)) assert_greater(np.max(np.abs(wrong_D - D1)), .01) # Paired distances def test_paired_euclidean_distances(): # Check the paired Euclidean distances computation X = [[0], [0]] Y = [[1], [2]] D = paired_euclidean_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_paired_manhattan_distances(): # Check the paired manhattan distances computation X = [[0], [0]] Y = [[1], [2]] D = paired_manhattan_distances(X, Y) assert_array_almost_equal(D, [1., 2.]) def test_chi_square_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((10, 4)) K_add = additive_chi2_kernel(X, Y) gamma = 0.1 K = chi2_kernel(X, Y, gamma=gamma) assert_equal(K.dtype, np.float) for i, x in enumerate(X): for j, y in enumerate(Y): chi2 = -np.sum((x - y) ** 2 / (x + y)) chi2_exp = np.exp(gamma * chi2) assert_almost_equal(K_add[i, j], chi2) assert_almost_equal(K[i, j], chi2_exp) # check diagonal is ones for data with itself K = chi2_kernel(Y) assert_array_equal(np.diag(K), 1) # check off-diagonal is < 1 but > 0: assert_true(np.all(K > 0)) assert_true(np.all(K - np.diag(np.diag(K)) < 1)) # check that float32 is preserved X = rng.random_sample((5, 4)).astype(np.float32) Y = rng.random_sample((10, 4)).astype(np.float32) K = chi2_kernel(X, Y) assert_equal(K.dtype, np.float32) # check integer type gets converted, # check that zeros are handled X = rng.random_sample((10, 4)).astype(np.int32) K = chi2_kernel(X, X) assert_true(np.isfinite(K).all()) assert_equal(K.dtype, np.float) # check that kernel of similar things is greater than dissimilar ones X = [[.3, .7], [1., 0]] Y = [[0, 1], [.9, .1]] K = chi2_kernel(X, Y) assert_greater(K[0, 0], K[0, 1]) assert_greater(K[1, 1], K[1, 0]) # test negative input assert_raises(ValueError, chi2_kernel, [[0, -1]]) assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]]) assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]]) # different n_features in X and Y assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]]) # sparse matrices assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y)) assert_raises(ValueError, additive_chi2_kernel, csr_matrix(X), csr_matrix(Y)) def test_kernel_symmetry(): # Valid kernels should be symmetric rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, laplacian_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) assert_array_almost_equal(K, K.T, 15) def test_kernel_sparse(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) X_sparse = csr_matrix(X) for kernel in (linear_kernel, polynomial_kernel, rbf_kernel, laplacian_kernel, sigmoid_kernel, cosine_similarity): K = kernel(X, X) K2 = kernel(X_sparse, X_sparse) assert_array_almost_equal(K, K2) def test_linear_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = linear_kernel(X, X) # the diagonal elements of a linear kernel are their squared norm assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X]) def test_rbf_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = rbf_kernel(X, X) # the diagonal elements of a rbf kernel are 1 assert_array_almost_equal(K.flat[::6], np.ones(5)) def test_laplacian_kernel(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) K = laplacian_kernel(X, X) # the diagonal elements of a laplacian kernel are 1 assert_array_almost_equal(np.diag(K), np.ones(5)) # off-diagonal elements are < 1 but > 0: assert_true(np.all(K > 0)) assert_true(np.all(K - np.diag(np.diag(K)) < 1)) def test_cosine_similarity_sparse_output(): # Test if cosine_similarity correctly produces sparse output. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((3, 4)) Xcsr = csr_matrix(X) Ycsr = csr_matrix(Y) K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False) assert_true(issparse(K1)) K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine") assert_array_almost_equal(K1.todense(), K2) def test_cosine_similarity(): # Test the cosine_similarity. rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) Y = rng.random_sample((3, 4)) Xcsr = csr_matrix(X) Ycsr = csr_matrix(Y) for X_, Y_ in ((X, None), (X, Y), (Xcsr, None), (Xcsr, Ycsr)): # Test that the cosine is kernel is equal to a linear kernel when data # has been previously normalized by L2-norm. K1 = pairwise_kernels(X_, Y=Y_, metric="cosine") X_ = normalize(X_) if Y_ is not None: Y_ = normalize(Y_) K2 = pairwise_kernels(X_, Y=Y_, metric="linear") assert_array_almost_equal(K1, K2) def test_check_dense_matrices(): # Ensure that pairwise array check works for dense matrices. # Check that if XB is None, XB is returned as reference to XA XA = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_true(XA_checked is XB_checked) assert_array_equal(XA, XA_checked) def test_check_XB_returned(): # Ensure that if XA and XB are given correctly, they return as equal. # Check that if XB is not None, it is returned equal. # Note that the second dimension of XB is the same as XA. XA = np.resize(np.arange(40), (5, 8)) XB = np.resize(np.arange(32), (4, 8)) XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) XB = np.resize(np.arange(40), (5, 8)) XA_checked, XB_checked = check_paired_arrays(XA, XB) assert_array_equal(XA, XA_checked) assert_array_equal(XB, XB_checked) def test_check_different_dimensions(): # Ensure an error is raised if the dimensions are different. XA = np.resize(np.arange(45), (5, 9)) XB = np.resize(np.arange(32), (4, 8)) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XB = np.resize(np.arange(4 * 9), (4, 9)) assert_raises(ValueError, check_paired_arrays, XA, XB) def test_check_invalid_dimensions(): # Ensure an error is raised on 1D input arrays. # The modified tests are not 1D. In the old test, the array was internally # converted to 2D anyways XA = np.arange(45).reshape(9, 5) XB = np.arange(32).reshape(4, 8) assert_raises(ValueError, check_pairwise_arrays, XA, XB) XA = np.arange(45).reshape(9, 5) XB = np.arange(32).reshape(4, 8) assert_raises(ValueError, check_pairwise_arrays, XA, XB) def test_check_sparse_arrays(): # Ensures that checks return valid sparse matrices. rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_sparse = csr_matrix(XA) XB = rng.random_sample((5, 4)) XB_sparse = csr_matrix(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse) # compare their difference because testing csr matrices for # equality with '==' does not work as expected. assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XB_checked)) assert_equal(abs(XB_sparse - XB_checked).sum(), 0) XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse) assert_true(issparse(XA_checked)) assert_equal(abs(XA_sparse - XA_checked).sum(), 0) assert_true(issparse(XA_2_checked)) assert_equal(abs(XA_2_checked - XA_checked).sum(), 0) def tuplify(X): # Turns a numpy matrix (any n-dimensional array) into tuples. s = X.shape if len(s) > 1: # Tuplify each sub-array in the input. return tuple(tuplify(row) for row in X) else: # Single dimension input, just return tuple of contents. return tuple(r for r in X) def test_check_tuple_input(): # Ensures that checks return valid tuples. rng = np.random.RandomState(0) XA = rng.random_sample((5, 4)) XA_tuples = tuplify(XA) XB = rng.random_sample((5, 4)) XB_tuples = tuplify(XB) XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples) assert_array_equal(XA_tuples, XA_checked) assert_array_equal(XB_tuples, XB_checked) def test_check_preserve_type(): # Ensures that type float32 is preserved. XA = np.resize(np.arange(40), (5, 8)).astype(np.float32) XB = np.resize(np.arange(40), (5, 8)).astype(np.float32) XA_checked, XB_checked = check_pairwise_arrays(XA, None) assert_equal(XA_checked.dtype, np.float32) # both float32 XA_checked, XB_checked = check_pairwise_arrays(XA, XB) assert_equal(XA_checked.dtype, np.float32) assert_equal(XB_checked.dtype, np.float32) # mismatched A XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float), XB) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float) # mismatched B XA_checked, XB_checked = check_pairwise_arrays(XA, XB.astype(np.float)) assert_equal(XA_checked.dtype, np.float) assert_equal(XB_checked.dtype, np.float)
bsd-3-clause
avmarchenko/exatomic
exatomic/qchem/output.py
2
2088
# -*- coding: utf-8 -*- # Copyright (c) 2015-2018, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Q-Chem Ouput Editor ####################### Editor classes for simple Q-Chem output files """ import six import numpy as np import pandas as pd from exa import TypedMeta from exa.util.units import Length#, Energy from .editor import Editor from exatomic.base import sym2z from exatomic.core.atom import Atom, Frequency from exatomic.core.frame import Frame#, compute_frame_from_atom from exatomic.core.basis import (BasisSet, BasisSetOrder, Overlap)#, deduplicate_basis_sets) from exatomic.core.orbital import Orbital, MOMatrix, Excitation #from exatomic.algorithms.basis import lmap, lorder class QMeta(TypedMeta): atom = Atom basis_set = BasisSet orbital = Orbital momatrix = MOMatrix basis_set_order = BasisSetOrder frame = Frame excitation = Excitation frequency = Frequency overlap = Overlap multipole = pd.DataFrame class Output(six.with_metaclass(QMeta, Editor)): def parse_atom(self): # Atom flags _regeom01 = "Standard Nuclear Orientation (Angstroms)" _regeom02 = "Coordinates (Angstroms)" # Find Data found = self.find(_regeom01, keys_only=True) starts = np.array(found) + 3 stop = starts[0] while '-------' not in self[stop]: stop += 1 stops = starts + (stop - starts[0]) dfs = [] for i, (start, stop) in enumerate(zip(starts, stops)): atom = self.pandas_dataframe(start, stop, 5) atom['frame'] = i dfs.append(atom) atom = pd.concat(dfs).reset_index(drop=True) atom.columns = ['set', 'symbol', 'x', 'y', 'z', 'frame'] atom['set'] -= 1 atom['x'] *= Length['Angstrom', 'au'] atom['y'] *= Length['Angstrom', 'au'] atom['z'] *= Length['Angstrom', 'au'] atom['Z'] = atom['symbol'].map(sym2z) self.atom = atom def __init__(self, *args, **kwargs): super(Output, self).__init__(*args,**kwargs)
apache-2.0
linan7788626/pandas_tutorial
exercises/pandas_wind_statistics/pandas_wind_statistics_solution.py
4
5511
# Copyright 2015 Enthought, Inc. All Rights Reserved """ Wind Statistics ---------------- This exercise is an alternative version of the Numpy exercise but this time we will be using pandas for all tasks. The data have been modified to contain some missing values, identified by NaN. Using pandas should make this exercise easier, in particular for the bonus question. Of course, you should be able to perform all of these operations without using a for loop or other looping construct. Topics: Pandas, time-series 1. The data in 'wind.data' has the following format:: Yr Mo Dy RPT VAL ROS KIL SHA BIR DUB CLA MUL CLO BEL MAL 61 1 1 15.04 14.96 13.17 9.29 NaN 9.87 13.67 10.25 10.83 12.58 18.50 15.04 61 1 2 14.71 NaN 10.83 6.50 12.62 7.67 11.50 10.04 9.79 9.67 17.54 13.83 61 1 3 18.50 16.88 12.33 10.13 11.17 6.17 11.25 NaN 8.50 7.67 12.75 12.71 The first three columns are year, month and day. The remaining 12 columns are average windspeeds in knots at 12 locations in Ireland on that day. Use the 'read_table' function from pandas to read the data into a DataFrame. 2. Replace the first 3 columns by a proper datetime index. 3. Compute how many values are missing for each location over the entire record. They should be ignored in all calculations below. Compute how many non-missing values there are in total. 4. Calculate the mean windspeeds of the windspeeds over all the locations and all the times (a single number for the entire dataset). 5. Calculate the min, max and mean windspeeds and standard deviations of the windspeeds at each location over all the days (a different set of numbers for each location) 6. Calculate the min, max and mean windspeed and standard deviations of the windspeeds across all the locations at each day (a different set of numbers for each day) 7. Find the average windspeed in January for each location. Treat January 1961 and January 1962 both as January. 8. Downsample the record to a yearly, monthly and weekly frequency for each location. 9. Plot the time series and a box plot of the monthly data for each location. Bonus ~~~~~ 10. Calculate the mean windspeed for each month in the dataset. Treat January 1961 and January 1962 as *different* months. 11. Calculate the min, max and mean windspeeds and standard deviations of the windspeeds across all locations for each week (assume that the first week starts on January 1 1961) for the first 52 weeks. Notes ~~~~~ This solution has been tested with Pandas version 0.14.1. The original data from which these were derived were analyzed in detail in the following article: Haslett, J. and Raftery, A. E. (1989). Space-time Modelling with Long-memory Dependence: Assessing Ireland's Wind Power Resource (with Discussion). Applied Statistics 38, 1-50. """ from matplotlib import pyplot as plt from pandas import read_table, set_option, Period # Part 1 & 2 - read the file and make dates def custom_date_parser(year_str, month_str, day_str): """ Return a pandas 'Period' representing the day A Period represents the whole day, while a datetime or a timestamp represents a particular moment in time. Period is better for this data, particularly when we resample. """ full_date_str = '19{0}-{1}-{2}'.format(year_str, month_str, day_str) return Period(full_date_str, freq='D') wind_data = read_table('wind.data', sep='\s+', index_col=0, parse_dates=[[0, 1, 2]], date_parser=custom_date_parser) # Non-missing values at each location print "3. Number of non-missing values for each location:" print wind_data.count() non_null_count = wind_data.count().sum() print "There are {0} non-missing values in the entire dataset".format( non_null_count) print print '4. Mean over all values' total = wind_data.sum().sum() print ' mean:', total/non_null_count print print '5. Statistics over all days at each location' print ' min:', wind_data.min() print ' max:', wind_data.max() print ' mean:', wind_data.mean() print ' standard deviation:', wind_data.std() print print '6. Statistics over all locations for each day' print ' min:', wind_data.min(axis=1) print ' max:', wind_data.max(axis=1) print ' mean:', wind_data.mean(axis=1) print ' standard deviation:', wind_data.std(axis=1) print # Part 7 - January data january_data = wind_data[wind_data.index.month == 1] print "January windspeeds:" print january_data.mean() print # Downsample the data to yearly, monthly and weekly data print "8. Downsampled data:" print "Yearly:", wind_data.resample('A', how='mean') print "Monthly:", wind_data.resample('M', how='mean') print "Weekly:", wind_data.resample('W', how='mean') print # 9. Plots monthly_data = wind_data.resample('M', how='mean') monthly_data.plot() # Force this plot to happen in a separate figure plt.figure() monthly_data.boxplot() plt.show() # 10. This is just another way to group records: unique_monthly_grouped = wind_data.groupby(lambda d: (d.month, d.year)) print '10. Mean wind speed for each month in each location' print unique_monthly_grouped.mean() print # 11. Weekly stats over the first year first_year = wind_data[wind_data.index.year == 1961] stats = wind_data.resample('W', how='mean').apply(lambda x: x.describe()) set_option('display.max_rows', 999) set_option('display.max_columns', 15) set_option('display.notebook_repr_html', False) print stats
mit
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/series/test_sorting.py
7
4805
# coding=utf-8 import numpy as np import random from pandas import (DataFrame, Series, MultiIndex) from pandas.util.testing import (assert_series_equal, assert_almost_equal) import pandas.util.testing as tm from .common import TestData class TestSeriesSorting(TestData, tm.TestCase): _multiprocess_can_split_ = True def test_sort(self): ts = self.ts.copy() # 9816 deprecated with tm.assert_produces_warning(FutureWarning): ts.sort() # sorts inplace self.assert_series_equal(ts, self.ts.sort_values()) def test_order(self): # 9816 deprecated with tm.assert_produces_warning(FutureWarning): result = self.ts.order() self.assert_series_equal(result, self.ts.sort_values()) def test_sort_values(self): # check indexes are reordered corresponding with the values ser = Series([3, 2, 4, 1], ['A', 'B', 'C', 'D']) expected = Series([1, 2, 3, 4], ['D', 'B', 'A', 'C']) result = ser.sort_values() self.assert_series_equal(expected, result) ts = self.ts.copy() ts[:5] = np.NaN vals = ts.values result = ts.sort_values() self.assertTrue(np.isnan(result[-5:]).all()) self.assert_numpy_array_equal(result[:-5].values, np.sort(vals[5:])) # na_position result = ts.sort_values(na_position='first') self.assertTrue(np.isnan(result[:5]).all()) self.assert_numpy_array_equal(result[5:].values, np.sort(vals[5:])) # something object-type ser = Series(['A', 'B'], [1, 2]) # no failure ser.sort_values() # ascending=False ordered = ts.sort_values(ascending=False) expected = np.sort(ts.valid().values)[::-1] assert_almost_equal(expected, ordered.valid().values) ordered = ts.sort_values(ascending=False, na_position='first') assert_almost_equal(expected, ordered.valid().values) # inplace=True ts = self.ts.copy() ts.sort_values(ascending=False, inplace=True) self.assert_series_equal(ts, self.ts.sort_values(ascending=False)) self.assert_index_equal(ts.index, self.ts.sort_values(ascending=False).index) # GH 5856/5853 # Series.sort_values operating on a view df = DataFrame(np.random.randn(10, 4)) s = df.iloc[:, 0] def f(): s.sort_values(inplace=True) self.assertRaises(ValueError, f) def test_sort_index(self): rindex = list(self.ts.index) random.shuffle(rindex) random_order = self.ts.reindex(rindex) sorted_series = random_order.sort_index() assert_series_equal(sorted_series, self.ts) # descending sorted_series = random_order.sort_index(ascending=False) assert_series_equal(sorted_series, self.ts.reindex(self.ts.index[::-1])) # compat on level sorted_series = random_order.sort_index(level=0) assert_series_equal(sorted_series, self.ts) # compat on axis sorted_series = random_order.sort_index(axis=0) assert_series_equal(sorted_series, self.ts) self.assertRaises(ValueError, lambda: random_order.sort_values(axis=1)) sorted_series = random_order.sort_index(level=0, axis=0) assert_series_equal(sorted_series, self.ts) self.assertRaises(ValueError, lambda: random_order.sort_index(level=0, axis=1)) def test_sort_index_inplace(self): # For #11402 rindex = list(self.ts.index) random.shuffle(rindex) # descending random_order = self.ts.reindex(rindex) result = random_order.sort_index(ascending=False, inplace=True) self.assertIs(result, None, msg='sort_index() inplace should return None') assert_series_equal(random_order, self.ts.reindex(self.ts.index[::-1])) # ascending random_order = self.ts.reindex(rindex) result = random_order.sort_index(ascending=True, inplace=True) self.assertIs(result, None, msg='sort_index() inplace should return None') assert_series_equal(random_order, self.ts) def test_sort_index_multiindex(self): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] # implicit sort_remaining=True res = s.sort_index(level='A') assert_series_equal(backwards, res) # GH13496 # rows share same level='A': sort has no effect without remaining lvls res = s.sort_index(level='A', sort_remaining=False) assert_series_equal(s, res)
mit
pletisan/python-data-viz-cookbook
3367OS_Code/3367OS_08_Code/ch08_rec06_textfont.py
1
1382
import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties # properties: families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'] sizes = ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'] styles = ['normal', 'italic', 'oblique'] weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black'] variants = ['normal', 'small-caps'] fig = plt.figure(figsize=(9,17)) ax = fig.add_subplot(111) ax.set_xlim(0,9) ax.set_ylim(0,17) # VAR: FAMILY, SIZE y = 0 size = sizes[0] style = styles[0] weight = weights[0] variant = variants[0] for family in families: x = 0 y = y + .5 for size in sizes: y = y + .4 sample = family + " " + size ax.text(x, y, sample, family=family, size=size, style=style, weight=weight, variant=variant) # VAR: STYLE, WEIGHT y = 0 family = families[0] size = sizes[4] variant = variants[0] for weight in weights: x = 5 y = y + .5 for style in styles: y = y + .4 print x, y sample = weight + " " + style ax.text(x, y, sample, family=family, size=size, style=style, weight=weight, variant=variant) ax.set_axis_off() plt.show()
mit
Akshay0724/scikit-learn
examples/model_selection/plot_confusion_matrix.py
63
3231
""" ================ Confusion matrix ================ Example of confusion matrix usage to evaluate the quality of the output of a classifier on the iris data set. The diagonal elements represent the number of points for which the predicted label is equal to the true label, while off-diagonal elements are those that are mislabeled by the classifier. The higher the diagonal values of the confusion matrix the better, indicating many correct predictions. The figures show the confusion matrix with and without normalization by class support size (number of elements in each class). This kind of normalization can be interesting in case of class imbalance to have a more visual interpretation of which class is being misclassified. Here the results are not as good as they could be as our choice for the regularization parameter C was not the best. In real life applications this parameter is usually chosen using :ref:`grid_search`. """ print(__doc__) import itertools import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix # import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target class_names = iris.target_names # Split the data into a training set and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results classifier = svm.SVC(kernel='linear', C=0.01) y_pred = classifier.fit(X_train, y_train).predict(X_test) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cnf_matrix = confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=class_names, title='Confusion matrix, without normalization') # Plot normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='Normalized confusion matrix') plt.show()
bsd-3-clause
probcomp/cgpm
tests/test_factor_analysis.py
1
8977
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016 MIT Probabilistic Computing Project # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import json import matplotlib.cm import matplotlib.colors import matplotlib.pyplot as plt import numpy as np import pytest import sklearn.datasets import sklearn.decomposition from cgpm.factor.factor import FactorAnalysis from cgpm.utils import general as gu from cgpm.utils import mvnormal as multivariate_normal def scatter_classes(x, classes, ax=None): """Scatter the data points coloring by the classes.""" if ax is None: _fig, ax = plt.subplots() ax = plt.gca() if ax is None else ax cmap = matplotlib.cm.jet norm = matplotlib.colors.Normalize( vmin=np.min(classes), vmax=np.max(classes)) mapper = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm) colors = mapper.to_rgba(classes) ax.scatter(x[:,0], x[:,1], color=colors) return ax def fillna(X, p, rng): """Population proportion p of entries in X with nan values.""" X = np.copy(X) a, b = X.shape n_entries = a*b n_missing = int(a*b*p) i_missing_flat = rng.choice(range(n_entries), size=n_missing, replace=False) i_missing_cell = np.unravel_index(i_missing_flat, (a,b)) for i, j in zip(*i_missing_cell): X[i,j] = np.nan return X def test_invalid_initialize(): # No inputs. with pytest.raises(ValueError): FactorAnalysis([1,2,6], [0], L=1) # Missing L with pytest.raises(ValueError): FactorAnalysis([1,2,6], None, L=None) # Wrong dimensionality: no observables. with pytest.raises(ValueError): FactorAnalysis([1,2], None, L=2) # Wrong dimensionality: latent space too big. with pytest.raises(ValueError): FactorAnalysis([1,2,3], None, L=2) # Wrong dimensionality: latent space too small. with pytest.raises(ValueError): FactorAnalysis([1,2,3], None, L=0) # Wrong dimensionality: not enough outputs. with pytest.raises(ValueError): FactorAnalysis([2], None, L=1) # Duplicate outputs. with pytest.raises(ValueError): FactorAnalysis([2,2], None, L=1) def test_valid_initialize(): # One latent dimension. fa = FactorAnalysis([4,2], None, L=1) assert fa.D == 1 assert fa.L == 1 # Four latent dimensions. fa = FactorAnalysis(range(12), None, L=4) assert fa.D == 8 assert fa.L == 4 # Latent dimension equal to observable dimensions. fa = FactorAnalysis([4,2,1,0,6,7], None, L=3) assert fa.D == 3 assert fa.L == 3 def test_incorporate(): fa = FactorAnalysis([4,5,9,2], None, L=1) # Cannot incorporate a latent variable. with pytest.raises(ValueError): fa.incorporate(0, {4:1, 5:1, 9:1, 2:0}) # Cannot incorporate with inputs. with pytest.raises(ValueError): fa.incorporate(0, {4:1, 5:1, 9:1}, {2:0}) # Need a query variable. with pytest.raises(ValueError): fa.incorporate(0, {}) # Unknown variable. with pytest.raises(ValueError): fa.incorporate(0, {1:0}) # Incorporate a full row. fa.incorporate(0, {4:1, 5:1, 9:1}) assert fa.data[0] == [1,1,1] # Incorporate rows with missing data. fa.incorporate(2, {5:1, 9:1}) assert fa.data[2] == [np.nan,1,1] # And another one. fa.incorporate(4, {9:1}) assert fa.data[4] == [np.nan,np.nan,1] # And another one. fa.incorporate(6, {4:-1}) assert fa.data[6] == [-1,np.nan,np.nan] for rowid in [0, 2, 4, 6]: fa.unincorporate(rowid) assert fa.N == 0 assert fa.data == {} with pytest.raises(ValueError): fa.unincorporate(23) outputs = [ [5,8,10,12,-1], [5,8,10,12,-1,-2], [5,8,10,12,-1,-2,-3], [5,8,10,12,-1,-2,-3,-4]] L = [1,2,3,4] @pytest.mark.parametrize('outputs, L', zip(outputs, L)) def test_logpdf_simulate_rigorous(outputs, L): # Direct factor anaysis rng = gu.gen_rng(12) iris = sklearn.datasets.load_iris() fact = FactorAnalysis(outputs, None, L=L, rng=rng) for i, row in enumerate(iris.data): fact.incorporate(i, {q:v for q,v in zip(fact.outputs, row)}) fact.transition() for rowid, row in enumerate(iris.data): # TEST 1: Posterior mean of the latent variable. dot, inv = np.dot, np.linalg.inv L, D = fact.fa.components_.shape mu = fact.fa.mean_ assert mu.shape == (D,) Phi = np.diag(fact.fa.noise_variance_) assert Phi.shape == (D, D) W = fact.fa.components_.T assert W.shape == (D, L) I = np.eye(L) # Compute using Murphy explicitly. S1 = inv((I + dot(W.T, dot(inv(Phi), W)))) m1 = dot(S1, dot(W.T, dot(inv(Phi), (row-mu)))) # Compute using the Schur complement explicitly. S2 = I - dot(dot(W.T, inv(dot(W,W.T) + Phi)), W) m2 = dot(dot(W.T, inv(dot(W, W.T)+Phi)), (row-mu)) # Compute the mean using the factor analyzer. m3 = fact.fa.transform([row]) # Compute using the marginalize features of fact.fa. mG, covG = FactorAnalysis.mvn_condition( fact.mu, fact.cov, fact.reindex(outputs[-L:]), { fact.reindex([outputs[0]])[0]: row[0], fact.reindex([outputs[1]])[0]: row[1], fact.reindex([outputs[2]])[0]: row[2], fact.reindex([outputs[3]])[0]: row[3], }) assert np.allclose(m1, m2) assert np.allclose(m2, m3) assert np.allclose(m3, mG) assert np.allclose(S1, S2) assert np.allclose(S2, covG) # TEST 2: Log density of observation. # Compute using the factor analyzer. logp1 = fact.fa.score(np.asarray([row])) # Compute manually. logp2 = multivariate_normal.logpdf(row, mu, Phi + np.dot(W, W.T)) # Compute using fact with rowid=-1. logp3 = fact.logpdf(-1, {o: row[i] for i,o in enumerate(outputs[:-L])}) # Compute using fact with rowid=r. logp4 = fact.logpdf(rowid, {o: row[i] for i,o in enumerate(outputs[:-L])}) assert np.allclose(logp1, logp2) assert np.allclose(logp2, logp3) assert np.allclose(logp3, logp4) # TEST 3: Posterior simulation of latent variables. # For each sampled dimension check mean and variance match. def check_mean_covariance_match(samples): X = np.zeros((2000, len(outputs[-L:]))) # Build the matrix of samples. for i, s in enumerate(samples): X[i] = [s[o] for o in outputs[-L:]] # Check mean of each variable. assert np.allclose(np.mean(X, axis=0), mG, atol=.1) # Check the sample covariance. assert np.allclose(np.cov(X.T), covG, atol=.1) # Using a hypothetical rowid. samples_a = fact.simulate( rowid=-1, targets=outputs[-L:], constraints={ outputs[0]: row[0], outputs[1]: row[1], outputs[2]: row[2], outputs[3]: row[3]}, N=2000 ) check_mean_covariance_match(samples_a) # Using observed rowid. samples_b = fact.simulate( rowid=rowid, targets=outputs[-L:], N=2000 ) check_mean_covariance_match(samples_b) def test_serialize(): # Direct factor anaysis rng = gu.gen_rng(12) iris = sklearn.datasets.load_iris() fact = FactorAnalysis([1,2,3,4,-5,47], None, L=2, rng=rng) for i, row in enumerate(iris.data): fact.incorporate(i, {q:v for q,v in zip(fact.outputs, row)}) metadata = json.dumps(fact.to_metadata()) metadata = json.loads(metadata) modname = importlib.import_module(metadata['factory'][0]) builder = getattr(modname, metadata['factory'][1]) fact2 = builder.from_metadata(metadata, rng=rng) assert fact2.L == fact.L assert fact2.D == fact.D # Varible indexes. assert fact2.outputs == fact.outputs assert fact2.latents == fact.latents # Dataset. assert fact2.data == fact.data assert fact2.N == fact.N # Parameters of Factor Analysis. assert np.allclose(fact2.mux, fact.mux) assert np.allclose(fact2.Psi, fact.Psi) assert np.allclose(fact2.W, fact.W) # Parameters of joint distribution [x,z]. assert np.allclose(fact2.mu, fact.mu) assert np.allclose(fact2.cov, fact.cov)
apache-2.0
GenericMappingTools/gmt-python
examples/tutorials/plot.py
1
3481
""" Plotting data points -------------------- GMT shines when it comes to plotting data on a map. We can use some sample data that is packaged with GMT to try this out. PyGMT provides access to these datasets through the :mod:`pygmt.datasets` package. If you don't have the data files already, they are automatically downloaded and saved to a cache directory the first time you use them (usually ``~/.gmt/cache``). """ # sphinx_gallery_thumbnail_number = 3 import pygmt ######################################################################################## # For example, let's load the sample dataset of tsunami generating earthquakes around # Japan (:func:`pygmt.datasets.load_japan_quakes`). The data is loaded as a # :class:`pandas.DataFrame`. data = pygmt.datasets.load_japan_quakes() # Set the region for the plot to be slightly larger than the data bounds. region = [ data.longitude.min() - 1, data.longitude.max() + 1, data.latitude.min() - 1, data.latitude.max() + 1, ] print(region) print(data.head()) ######################################################################################## # We'll use the :meth:`pygmt.Figure.plot` method to plot circles on the earthquake epicenters. fig = pygmt.Figure() fig.basemap(region=region, projection="M15c", frame=True) fig.coast(land="black", water="skyblue") fig.plot(x=data.longitude, y=data.latitude, style="c0.3c", color="white", pen="black") fig.show() ######################################################################################## # We used the style ``c0.3c`` which means "circles of 0.3 centimeter size". The ``pen`` # parameter controls the outline of the symbols and the ``color`` parameter controls the fill. # # We can map the size of the circles to the earthquake magnitude by passing an array to # the ``size`` parameter. Because the magnitude is on a logarithmic scale, it helps to # show the differences by scaling the values using a power law. fig = pygmt.Figure() fig.basemap(region=region, projection="M15c", frame=True) fig.coast(land="black", water="skyblue") fig.plot( x=data.longitude, y=data.latitude, size=0.02 * (2 ** data.magnitude), style="cc", color="white", pen="black", ) fig.show() ######################################################################################## # Notice that we didn't include the size in the ``style`` parameter this time, just the # symbol ``c`` (circles) and the unit ``c`` (centimeter). So in this case, the size # will be interpreted as being in centimeters. # # We can also map the colors of the markers to the depths by passing an array to the # ``color`` parameter and providing a colormap name (``cmap``). We can even use the new # matplotlib colormap "viridis". Here, we first create a continuous colormap # ranging from the minimum depth to the maximum depth of the earthquakes # using :func:`pygmt.makecpt`, then set ``cmap=True`` in :func:`pygmt.Figure.plot` # to use the colormap. At the end of the plot, we also plot a colorbar showing # the colormap used in the plot. # fig = pygmt.Figure() fig.basemap(region=region, projection="M15c", frame=True) fig.coast(land="black", water="skyblue") pygmt.makecpt(cmap="viridis", series=[data.depth_km.min(), data.depth_km.max()]) fig.plot( x=data.longitude, y=data.latitude, size=0.02 * 2 ** data.magnitude, color=data.depth_km, cmap=True, style="cc", pen="black", ) fig.colorbar(frame='af+l"Depth (km)"') fig.show()
bsd-3-clause
sergiohr/NeuroDB
test/test9.py
1
1313
''' Created on Feb 25, 2015 @author: sergio ''' import numpy as np import ctypes import numpy.ctypeslib as npct import matplotlib.pyplot as plt import psycopg2 import time from math import e, pow from scipy.optimize import leastsq if __name__ == '__main__': # username = 'postgres' # password = 'postgres' # host = '192.168.2.2' # dbname = 'demo' # url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname) # # dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host)) # cursor = dbconn.cursor() fi = open("/home/sergio/Downloads/fig2_panelB.dat", "r") fo = open("/home/sergio/iibm/cluster_dp/examples.dat", "w") a = fi.readline() p = [] while(a != ""): b= a.split(' ') # query = "INSERT INTO spike (id_segment, id_recordingchannel,p1,p2) VALUES (%s,%s,%s,%s)"%(136, 81, float(b[0]), float(b[1])) # cursor.execute(query) p.append([float(b[0]),float(b[1])]) a = fi.readline() # dbconn.commit() for i in range(len(p)): for j in range(i+1, len(p)): d = ((p[i][0]-p[j][0])**2 + (p[i][1]-p[j][1])**2)**0.5 fo.write("%s %s %s\n"%(i+1, j+1, d)) fo.close() fi.close() pass
gpl-3.0
fredhusser/scikit-learn
benchmarks/bench_multilabel_metrics.py
276
7138
#!/usr/bin/env python """ A comparison of multilabel target formats and metrics over them """ from __future__ import division from __future__ import print_function from timeit import timeit from functools import partial import itertools import argparse import sys import matplotlib.pyplot as plt import scipy.sparse as sp import numpy as np from sklearn.datasets import make_multilabel_classification from sklearn.metrics import (f1_score, accuracy_score, hamming_loss, jaccard_similarity_score) from sklearn.utils.testing import ignore_warnings METRICS = { 'f1': partial(f1_score, average='micro'), 'f1-by-sample': partial(f1_score, average='samples'), 'accuracy': accuracy_score, 'hamming': hamming_loss, 'jaccard': jaccard_similarity_score, } FORMATS = { 'sequences': lambda y: [list(np.flatnonzero(s)) for s in y], 'dense': lambda y: y, 'csr': lambda y: sp.csr_matrix(y), 'csc': lambda y: sp.csc_matrix(y), } @ignore_warnings def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())), formats=tuple(v for k, v in sorted(FORMATS.items())), samples=1000, classes=4, density=.2, n_times=5): """Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds. """ metrics = np.atleast_1d(metrics) samples = np.atleast_1d(samples) classes = np.atleast_1d(classes) density = np.atleast_1d(density) formats = np.atleast_1d(formats) out = np.zeros((len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float) it = itertools.product(samples, classes, density) for i, (s, c, d) in enumerate(it): _, y_true = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=42) _, y_pred = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=84) for j, f in enumerate(formats): f_true = f(y_true) f_pred = f(y_pred) for k, metric in enumerate(metrics): t = timeit(partial(metric, f_true, f_pred), number=n_times) out[k, j].flat[i] = t return out def _tabulate(results, metrics, formats): """Prints results by metric and format Uses the last ([-1]) value of other fields """ column_width = max(max(len(k) for k in formats) + 1, 8) first_width = max(len(k) for k in metrics) head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats)) row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats)) print(head_fmt.format('Metric', *formats, cw=column_width, fw=first_width)) for metric, row in zip(metrics, results[:, :, -1, -1, -1]): print(row_fmt.format(metric, *row, cw=column_width, fw=first_width)) def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')): """ Plot the results by metric, format and some other variable given by x_label """ fig = plt.figure('scikit-learn multilabel metrics benchmarks') plt.title(title) ax = fig.add_subplot(111) for i, metric in enumerate(metrics): for j, format in enumerate(formats): ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)]) ax.set_xlabel(x_label) ax.set_ylabel('Time (s)') ax.legend() plt.show() if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument('metrics', nargs='*', default=sorted(METRICS), help='Specifies metrics to benchmark, defaults to all. ' 'Choices are: {}'.format(sorted(METRICS))) ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS), help='Specifies multilabel formats to benchmark ' '(defaults to all).') ap.add_argument('--samples', type=int, default=1000, help='The number of samples to generate') ap.add_argument('--classes', type=int, default=10, help='The number of classes') ap.add_argument('--density', type=float, default=.2, help='The average density of labels per sample') ap.add_argument('--plot', choices=['classes', 'density', 'samples'], default=None, help='Plot time with respect to this parameter varying ' 'up to the specified value') ap.add_argument('--n-steps', default=10, type=int, help='Plot this many points for each metric') ap.add_argument('--n-times', default=5, type=int, help="Time performance over n_times trials") args = ap.parse_args() if args.plot is not None: max_val = getattr(args, args.plot) if args.plot in ('classes', 'samples'): min_val = 2 else: min_val = 0 steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:] if args.plot in ('classes', 'samples'): steps = np.unique(np.round(steps).astype(int)) setattr(args, args.plot, steps) if args.metrics is None: args.metrics = sorted(METRICS) if args.formats is None: args.formats = sorted(FORMATS) results = benchmark([METRICS[k] for k in args.metrics], [FORMATS[k] for k in args.formats], args.samples, args.classes, args.density, args.n_times) _tabulate(results, args.metrics, args.formats) if args.plot is not None: print('Displaying plot', file=sys.stderr) title = ('Multilabel metrics with %s' % ', '.join('{0}={1}'.format(field, getattr(args, field)) for field in ['samples', 'classes', 'density'] if args.plot != field)) _plot(results, args.metrics, args.formats, title, steps, args.plot)
bsd-3-clause
ZENGXH/scikit-learn
sklearn/utils/tests/test_sparsefuncs.py
57
13752
import numpy as np import scipy.sparse as sp from scipy import linalg from numpy.testing import assert_array_almost_equal, assert_array_equal from sklearn.datasets import make_classification from sklearn.utils.sparsefuncs import (mean_variance_axis, inplace_column_scale, inplace_row_scale, inplace_swap_row, inplace_swap_column, min_max_axis, count_nonzero, csc_median_axis_0) from sklearn.utils.sparsefuncs_fast import assign_rows_csr from sklearn.utils.testing import assert_raises def test_mean_variance_axis0(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 X_csr = sp.csr_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csr, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) X_csc = sp.csc_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csc, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=0) X = X.astype(np.float32) X_csr = X_csr.astype(np.float32) X_csc = X_csr.astype(np.float32) X_means, X_vars = mean_variance_axis(X_csr, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) X_means, X_vars = mean_variance_axis(X_csc, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=0) def test_mean_variance_illegal_axis(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_csr = sp.csr_matrix(X) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3) assert_raises(ValueError, mean_variance_axis, X_csr, axis=2) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1) def test_mean_variance_axis1(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 X_csr = sp.csr_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csr, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) X_csc = sp.csc_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csc, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=1) X = X.astype(np.float32) X_csr = X_csr.astype(np.float32) X_csc = X_csr.astype(np.float32) X_means, X_vars = mean_variance_axis(X_csr, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) X_means, X_vars = mean_variance_axis(X_csc, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=1) def test_densify_rows(): X = sp.csr_matrix([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) rows = np.array([0, 2, 3], dtype=np.intp) out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64) assign_rows_csr(X, rows, np.arange(out.shape[0], dtype=np.intp)[::-1], out) assert_array_equal(out, X[rows].toarray()[::-1]) def test_inplace_column_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(200) XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_row_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(100) XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_swap_row(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) def test_inplace_swap_column(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) def test_min_max_axis0(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) def test_min_max_axis1(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) def test_min_max_axis_errors(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0) assert_raises(ValueError, min_max_axis, X_csr, axis=2) assert_raises(ValueError, min_max_axis, X_csc, axis=-3) def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal(count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2) def test_csc_row_median(): # Test csc_row_median actually calculates the median. # Test that it gives the same output when X is dense. rng = np.random.RandomState(0) X = rng.rand(100, 50) dense_median = np.median(X, axis=0) csc = sp.csc_matrix(X) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test that it gives the same output when X is sparse X = rng.rand(51, 100) X[X < 0.7] = 0.0 ind = rng.randint(0, 50, 10) X[ind] = -X[ind] csc = sp.csc_matrix(X) dense_median = np.median(X, axis=0) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test for toy data. X = [[0, -2], [-1, -1], [1, 0], [2, 1]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5])) X = [[0, -2], [-1, -5], [1, -3]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0., -3])) # Test that it raises an Error for non-csc matrices. assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
bsd-3-clause
annayqho/TheCannon
code/lamost/li_giants/plot_residual.py
1
3418
""" Create some model spectra vs. data """ import numpy as np import matplotlib.pyplot as plt from math import log10, floor from matplotlib import rc import matplotlib.gridspec as gridspec from matplotlib.colors import LogNorm plt.rc('text', usetex=True) # rc('text.latex', preamble = ','.join('''\usepackage{txfonts}'''.split())) plt.rc('font', family='serif') #from TheCannon import train_model from TheCannon import dataset from TheCannon import model from matplotlib.ticker import MaxNLocator def plot(ii, wl, flux, ivar, model_all, coeffs, scatters, chisqs, pivots, start_wl, end_wl, highlights, figname): xmin = start_wl xmax = end_wl f = flux[ii,:] iv = ivar[ii,:] model_spec = model_all[ii,:] choose = np.logical_and(wl > xmin, wl < xmax) ymin = min(f[choose])-0.05 ymax = max(f[choose])+0.05 # err = scat ^2 + uncertainty^2 m = model.CannonModel(2, useErrors = False) m.coeffs = coeffs m.scatters = scatters m.chisqs = chisqs m.pivots = pivots scat = m.scatters iv_tot = (iv/(scat**2 * iv + 1)) err = np.ones(len(iv_tot))*1000 err[iv_tot>0] = 1/iv_tot[iv_tot>0]**0.5 #print("X2 is: " + str(sum((f - model_spec)**2 * iv_tot))) # Cinv = ivars / (1 + ivars*scatter**2) # lTCinvl = np.dot(lvec.T, Cinv[:, None] * lvec) # lTCinvf = np.dot(lvec.T, Cinv * fluxes) # Thanks to David Hogg / Andy Casey for this... # I stole it from the Annie's Lasso Github. gs = gridspec.GridSpec(2, 1, height_ratios=[1,4]) fig = plt.figure(figsize=(13.3, 4)) ax_residual = plt.subplot(gs[0]) ax_spectrum = plt.subplot(gs[1]) ax_spectrum.plot( wl, f, c='k', alpha=0.7, drawstyle='steps-mid', label="Data") #ax_spectrum.scatter(wl, f, c='k') ax_spectrum.plot( wl, model_spec, c='r', alpha=0.7, label="The Cannon Model") ax_spectrum.fill_between( wl, model_spec+err, model_spec-err, alpha=0.1, color='r') ax_spectrum.set_ylim(ymin, ymax) ax_spectrum.set_xlim(xmin, xmax) ax_spectrum.axhline(1, c="k", linestyle=":", zorder=-1) ax_spectrum.legend(loc="lower right") resid = f-model_spec r_ymin = min(resid[choose])-0.01 r_ymax = max(resid[choose])+0.01 ax_residual.plot(wl, resid, c='k', alpha=0.8, drawstyle='steps-mid') ax_residual.fill_between(wl, resid+err, resid-err, alpha=0.1, color='k') ax_residual.set_ylim(r_ymin,r_ymax) ax_residual.set_xlim(ax_spectrum.get_xlim()) ax_residual.axhline(0, c="k", linestyle=":", zorder=-1) for highlight in highlights: ax_residual.axvline(x=highlight, c='r', linewidth=2, linestyle='--') ax_residual.set_xticklabels([]) ax_residual.yaxis.set_major_locator(MaxNLocator(3)) ax_residual.xaxis.set_major_locator(MaxNLocator(6)) ax_spectrum.xaxis.set_major_locator(MaxNLocator(6)) ax_spectrum.yaxis.set_major_locator(MaxNLocator(4)) ax_spectrum.set_xlabel(r"Wavelength $\lambda (\AA)$", fontsize=18) ax_spectrum.set_ylabel("Normalized flux", fontsize=18) ax_spectrum.tick_params(axis="both", labelsize=18) ax_residual.tick_params(axis="both", labelsize=18) fig.tight_layout() for highlight in highlights: plt.axvline(x=highlight, c='r', linewidth=2, linestyle='--') #plt.show() plt.savefig(figname) plt.close() #plt.savefig("model_spectrum_full.png") #plt.savefig("model_spectrum.png")
mit
Insight-book/data-science-from-scratch
first-edition-ko/code/ch20_natural_language_processing.py
12
10007
from __future__ import division import math, random, re from collections import defaultdict, Counter from bs4 import BeautifulSoup import requests def plot_resumes(plt): data = [ ("big data", 100, 15), ("Hadoop", 95, 25), ("Python", 75, 50), ("R", 50, 40), ("machine learning", 80, 20), ("statistics", 20, 60), ("data science", 60, 70), ("analytics", 90, 3), ("team player", 85, 85), ("dynamic", 2, 90), ("synergies", 70, 0), ("actionable insights", 40, 30), ("think out of the box", 45, 10), ("self-starter", 30, 50), ("customer focus", 65, 15), ("thought leadership", 35, 35)] def text_size(total): """equals 8 if total is 0, 28 if total is 200""" return 8 + total / 200 * 20 for word, job_popularity, resume_popularity in data: plt.text(job_popularity, resume_popularity, word, ha='center', va='center', size=text_size(job_popularity + resume_popularity)) plt.xlabel("Popularity on Job Postings") plt.ylabel("Popularity on Resumes") plt.axis([0, 100, 0, 100]) plt.show() # # n-gram models # def fix_unicode(text): return text.replace(u"\u2019", "'") def get_document(): url = "http://radar.oreilly.com/2010/06/what-is-data-science.html" html = requests.get(url).text soup = BeautifulSoup(html, 'html5lib') content = soup.find("div", "article-body") # find article-body div regex = r"[\w']+|[\.]" # matches a word or a period document = [] for paragraph in content("p"): words = re.findall(regex, fix_unicode(paragraph.text)) document.extend(words) return document def generate_using_bigrams(transitions): current = "." # this means the next word will start a sentence result = [] while True: next_word_candidates = transitions[current] # bigrams (current, _) current = random.choice(next_word_candidates) # choose one at random result.append(current) # append it to results if current == ".": return " ".join(result) # if "." we're done def generate_using_trigrams(starts, trigram_transitions): current = random.choice(starts) # choose a random starting word prev = "." # and precede it with a '.' result = [current] while True: next_word_candidates = trigram_transitions[(prev, current)] next = random.choice(next_word_candidates) prev, current = current, next result.append(current) if current == ".": return " ".join(result) def is_terminal(token): return token[0] != "_" def expand(grammar, tokens): for i, token in enumerate(tokens): # ignore terminals if is_terminal(token): continue # choose a replacement at random replacement = random.choice(grammar[token]) if is_terminal(replacement): tokens[i] = replacement else: tokens = tokens[:i] + replacement.split() + tokens[(i+1):] return expand(grammar, tokens) # if we get here we had all terminals and are done return tokens def generate_sentence(grammar): return expand(grammar, ["_S"]) # # Gibbs Sampling # def roll_a_die(): return random.choice([1,2,3,4,5,6]) def direct_sample(): d1 = roll_a_die() d2 = roll_a_die() return d1, d1 + d2 def random_y_given_x(x): """equally likely to be x + 1, x + 2, ... , x + 6""" return x + roll_a_die() def random_x_given_y(y): if y <= 7: # if the total is 7 or less, the first die is equally likely to be # 1, 2, ..., (total - 1) return random.randrange(1, y) else: # if the total is 7 or more, the first die is equally likely to be # (total - 6), (total - 5), ..., 6 return random.randrange(y - 6, 7) def gibbs_sample(num_iters=100): x, y = 1, 2 # doesn't really matter for _ in range(num_iters): x = random_x_given_y(y) y = random_y_given_x(x) return x, y def compare_distributions(num_samples=1000): counts = defaultdict(lambda: [0, 0]) for _ in range(num_samples): counts[gibbs_sample()][0] += 1 counts[direct_sample()][1] += 1 return counts # # TOPIC MODELING # def sample_from(weights): total = sum(weights) rnd = total * random.random() # uniform between 0 and total for i, w in enumerate(weights): rnd -= w # return the smallest i such that if rnd <= 0: return i # sum(weights[:(i+1)]) >= rnd documents = [ ["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"], ["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"], ["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"], ["R", "Python", "statistics", "regression", "probability"], ["machine learning", "regression", "decision trees", "libsvm"], ["Python", "R", "Java", "C++", "Haskell", "programming languages"], ["statistics", "probability", "mathematics", "theory"], ["machine learning", "scikit-learn", "Mahout", "neural networks"], ["neural networks", "deep learning", "Big Data", "artificial intelligence"], ["Hadoop", "Java", "MapReduce", "Big Data"], ["statistics", "R", "statsmodels"], ["C++", "deep learning", "artificial intelligence", "probability"], ["pandas", "R", "Python"], ["databases", "HBase", "Postgres", "MySQL", "MongoDB"], ["libsvm", "regression", "support vector machines"] ] K = 4 document_topic_counts = [Counter() for _ in documents] topic_word_counts = [Counter() for _ in range(K)] topic_counts = [0 for _ in range(K)] document_lengths = map(len, documents) distinct_words = set(word for document in documents for word in document) W = len(distinct_words) D = len(documents) def p_topic_given_document(topic, d, alpha=0.1): """the fraction of words in document _d_ that are assigned to _topic_ (plus some smoothing)""" return ((document_topic_counts[d][topic] + alpha) / (document_lengths[d] + K * alpha)) def p_word_given_topic(word, topic, beta=0.1): """the fraction of words assigned to _topic_ that equal _word_ (plus some smoothing)""" return ((topic_word_counts[topic][word] + beta) / (topic_counts[topic] + W * beta)) def topic_weight(d, word, k): """given a document and a word in that document, return the weight for the k-th topic""" return p_word_given_topic(word, k) * p_topic_given_document(k, d) def choose_new_topic(d, word): return sample_from([topic_weight(d, word, k) for k in range(K)]) random.seed(0) document_topics = [[random.randrange(K) for word in document] for document in documents] for d in range(D): for word, topic in zip(documents[d], document_topics[d]): document_topic_counts[d][topic] += 1 topic_word_counts[topic][word] += 1 topic_counts[topic] += 1 for iter in range(1000): for d in range(D): for i, (word, topic) in enumerate(zip(documents[d], document_topics[d])): # remove this word / topic from the counts # so that it doesn't influence the weights document_topic_counts[d][topic] -= 1 topic_word_counts[topic][word] -= 1 topic_counts[topic] -= 1 document_lengths[d] -= 1 # choose a new topic based on the weights new_topic = choose_new_topic(d, word) document_topics[d][i] = new_topic # and now add it back to the counts document_topic_counts[d][new_topic] += 1 topic_word_counts[new_topic][word] += 1 topic_counts[new_topic] += 1 document_lengths[d] += 1 if __name__ == "__main__": document = get_document() bigrams = zip(document, document[1:]) transitions = defaultdict(list) for prev, current in bigrams: transitions[prev].append(current) random.seed(0) print "bigram sentences" for i in range(10): print i, generate_using_bigrams(transitions) print # trigrams trigrams = zip(document, document[1:], document[2:]) trigram_transitions = defaultdict(list) starts = [] for prev, current, next in trigrams: if prev == ".": # if the previous "word" was a period starts.append(current) # then this is a start word trigram_transitions[(prev, current)].append(next) print "trigram sentences" for i in range(10): print i, generate_using_trigrams(starts, trigram_transitions) print grammar = { "_S" : ["_NP _VP"], "_NP" : ["_N", "_A _NP _P _A _N"], "_VP" : ["_V", "_V _NP"], "_N" : ["data science", "Python", "regression"], "_A" : ["big", "linear", "logistic"], "_P" : ["about", "near"], "_V" : ["learns", "trains", "tests", "is"] } print "grammar sentences" for i in range(10): print i, " ".join(generate_sentence(grammar)) print print "gibbs sampling" comparison = compare_distributions() for roll, (gibbs, direct) in comparison.iteritems(): print roll, gibbs, direct # topic MODELING for k, word_counts in enumerate(topic_word_counts): for word, count in word_counts.most_common(): if count > 0: print k, word, count topic_names = ["Big Data and programming languages", "Python and statistics", "databases", "machine learning"] for document, topic_counts in zip(documents, document_topic_counts): print document for topic, count in topic_counts.most_common(): if count > 0: print topic_names[topic], count, print
unlicense
lmarkely/enron_fraud
poi_id_modified.py
1
28009
#!/usr/bin/python import sys import pickle sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit from tester import dump_classifier_and_data ### Task 1: Select what features you'll use. ### features_list is a list of strings, each of which is a feature name. ### The first feature must be "poi". ### Include all quantitative features. In addition, 'std_from_poi' and ### 'std_to_poi' are standardized feature (see details below). features_list = ['poi','salary', 'bonus', 'expenses', 'exercised_stock_options', 'other', 'restricted_stock', 'shared_receipt_with_poi', 'std_from_poi','std_to_poi'] ### Load the dictionary containing the dataset with open("final_project_dataset.pkl", "r") as data_file: data_dict = pickle.load(data_file) ### Task 2: Remove outliers ### Task 3: Create new feature(s) ### Store to my_dataset for easy export below. # Add new features: std_from_poi and std_to_poi by dividing the message # to/from poi by the total sent or received messages, respectively. data_dict.pop('TOTAL') data_dict.pop('THE TRAVEL AGENCY IN THE PARK') data_dict.pop('LOCKHART EUGENE E') for key in data_dict: if (type(data_dict[key]['from_poi_to_this_person']) == int and type(data_dict[key]['from_messages']) == int): data_dict[key]['std_from_poi'] = \ (data_dict[key]['from_poi_to_this_person']/ data_dict[key]['from_messages']) else: data_dict[key]['std_from_poi'] = 0 if (type(data_dict[key]['from_this_person_to_poi']) == int and type(data_dict[key]['to_messages']) == int): data_dict[key]['std_to_poi'] = \ (data_dict[key]['from_this_person_to_poi']/ data_dict[key]['to_messages']) else: data_dict[key]['std_to_poi'] = 0 my_dataset = data_dict ### Extract features and labels from dataset for local testing data = featureFormat(my_dataset, features_list, sort_keys = True) labels, features = targetFeatureSplit(data) ### Task 4: Try a varity of classifiers ### Please name your classifier clf for easy export below. ### Note that if you want to do PCA or other multi-stage operations, ### you'll need to use Pipelines. For more info: ### http://scikit-learn.org/stable/modules/pipeline.html # Provided to give you a starting point. Try a variety of classifiers. # The followings are the major steps in the analysis: # A. Visualize the data using dimensionality reduction PCA and LDA to gain # further insight into the data # B. Algorithm selection using repeated nested cross validation to choose # the algorithm that has highest accuracy # C. Model selection using repeated cross validation to identify the best # hyperparameter values # The following classification algorithms are used: # 1. Logistic Regression # 2. Random Forest Classifier # 3. KNN Classifier # 4. Support Vector Classifier # 5. Neural Network: Multi-layer Perceptron Classifier from IPython.core.display import display from __future__ import division import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from __future__ import division from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.neural_network import MLPClassifier from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.decomposition import PCA from time import time # For simplicity, rename features as X and labels as y X = features y = labels ### First, explore the dataset. ### Identify the total number of data points. print 'Total number of data points:',np.shape(X)[0] print 'Total number of features:', np.shape(X)[1] X_std = MinMaxScaler().fit_transform(X) pca = PCA() X_pca = pca.fit_transform(X_std) print 'PCA explained_variance_ratio_', pca.explained_variance_ratio_ #This section is adapted from Udacity Forum 'What are the testing #features when using SelectKBest?' K_best = SelectKBest(chi2,k=9) features_kbest = K_best.fit_transform(X_std,y) print features_kbest.shape feature_scores = ['%.3f' %elem for elem in K_best.scores_] feature_scores_pvalues = ['%.3f' %elem for elem in K_best.pvalues_] features_selected_tuple = [(features_list[i+1],feature_scores[i],feature_scores_pvalues[i]) for i in K_best.get_support(indices=True)] features_selected_tuple = sorted(features_selected_tuple,key=lambda feature: float(feature[1]), reverse=True) sorted_scores = [] sorted_p_value = [] sorted_feature = [] for feature_tuple in features_selected_tuple: sorted_feature.append(feature_tuple[0]) sorted_scores.append(feature_tuple[1]) sorted_p_value.append(feature_tuple[2]) print(feature_tuple) df = pd.DataFrame(features_selected_tuple).set_index(0) import seaborn as sns import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame(X_std) pg = sns.PairGrid(df) pg.map_diag(plt.hist) pg.map_offdiag(plt.scatter) plt.show() ### Task 5: Tune your classifier to achieve better than .3 precision and recall ### using our testing script. Check the tester.py script in the final project ### folder for details on the evaluation method, especially the test_classifier ### function. Because of the small size of the dataset, the script uses ### stratified shuffle split cross validation. For more info: ### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html clf_labels = \ ['Logistic Regression','KNN','Random Forest','SVC','Kernel SVC','MLP'] #Set the number of repeats of the cross validation N_outer = 5 N_inner = 5 #Logistic Regression scores=[] clf_lr = LogisticRegression(penalty='l2') pipe_lr = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_lr]]) params_lr = {'clf__C':10.0**np.arange(-4,4)} t0 = time() for i in range(N_outer): k_fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): k_fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_lr = GridSearchCV(estimator=pipe_lr,param_grid=params_lr, cv=k_fold_inner,scoring='f1') scores.append(cross_val_score(gs_lr,X,y,cv=k_fold_outer, scoring='f1')) print 'CV F1 Score of Logistic Regression: %.3f +/- %.3f' %(np.mean(scores),np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): k_fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): k_fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_lr = GridSearchCV(estimator=pipe_lr,param_grid=params_lr, cv=k_fold_inner,scoring='precision') scores.append(cross_val_score(gs_lr,X,y,cv=k_fold_outer, scoring='precision')) print 'CV Precision Score of Logistic Regression: %.3f +/- %.3f' %(np.mean(scores),np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): k_fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): k_fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_lr = GridSearchCV(estimator=pipe_lr,param_grid=params_lr, cv=k_fold_inner,scoring='recall') scores.append(cross_val_score(gs_lr,X,y,cv=k_fold_outer, scoring='recall')) print 'CV Recall Score of Logistic Regression: %.3f +/- %.3f' %(np.mean(scores),np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) #Set the number of repeats of the cross validation N_outer = 5 N_inner = 5 #Random Forest Classifier scores=[] clf_rf = RandomForestClassifier(random_state=42) pipe_rf = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_rf]]) params_rf = {'clf__n_estimators':np.arange(1,11)} t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_rf = GridSearchCV(estimator=pipe_rf,param_grid=params_rf, cv=fold_inner,scoring='f1') scores.append(cross_val_score(gs_rf,X,y,cv=fold_outer, scoring='f1')) print ('CV F1 Score of Random Forest Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_rf = GridSearchCV(estimator=pipe_rf,param_grid=params_rf, cv=fold_inner,scoring='precision') scores.append(cross_val_score(gs_rf,X,y,cv=fold_outer, scoring='precision')) print ('CV Precision Score of Random Forest Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_rf = GridSearchCV(estimator=pipe_rf,param_grid=params_rf, cv=fold_inner,scoring='recall') scores.append(cross_val_score(gs_rf,X,y,cv=fold_outer, scoring='recall')) print ('CV Recall Score of Random Forest Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) #Set the number of repeats of the cross validation N_outer = 5 N_inner = 5 #KNN Classifier scores=[] clf_knn = KNeighborsClassifier() pipe_knn = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_knn]]) params_knn = {'clf__n_neighbors':np.arange(1,6)} t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_knn = GridSearchCV(estimator=pipe_knn,param_grid=params_knn, cv=fold_inner,scoring='f1') scores.append(cross_val_score(gs_knn,X,y,cv=fold_outer, scoring='f1')) print ('CV F1 Score of KNN Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_knn = GridSearchCV(estimator=pipe_knn,param_grid=params_knn, cv=fold_inner,scoring='precision') scores.append(cross_val_score(gs_knn,X,y,cv=fold_outer, scoring='precision')) print ('CV Precision Score of KNN Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_knn = GridSearchCV(estimator=pipe_knn,param_grid=params_knn, cv=fold_inner,scoring='recall') scores.append(cross_val_score(gs_knn,X,y,cv=fold_outer, scoring='recall')) print ('CV Recall Score of KNN Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) #Set the number of repeats of the cross validation N_outer = 5 N_inner = 5 #Linear SVC scores=[] clf_svc = SVC() pipe_svc = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_svc]]) params_svc = {'clf__C':10.0**np.arange(-4,4)} t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_svc = GridSearchCV(estimator=pipe_svc,param_grid=params_svc, cv=fold_inner,scoring='f1') scores.append(cross_val_score(gs_svc,X,y,cv=fold_outer, scoring='f1')) print ('CV F1 Score of Linear SVC: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_svc = GridSearchCV(estimator=pipe_svc,param_grid=params_svc, cv=fold_inner,scoring='precision') scores.append(cross_val_score(gs_svc,X,y,cv=fold_outer, scoring='precision')) print ('CV Precision Score of Linear SVC: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_svc = GridSearchCV(estimator=pipe_svc,param_grid=params_svc, cv=fold_inner,scoring='recall') scores.append(cross_val_score(gs_svc,X,y,cv=fold_outer, scoring='recall')) print ('CV Recall Score of Linear SVC: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) #Set the number of repeats of the cross validation N_outer = 5 N_inner = 5 #Kernel SVC scores=[] clf_ksvc = SVC(kernel='rbf') pipe_ksvc = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_ksvc]]) params_ksvc = {'clf__C':10.0**np.arange(-4,4),'clf__gamma':10.0**np.arange(-4,4)} t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_ksvc = GridSearchCV(estimator=pipe_ksvc,param_grid=params_ksvc, cv=fold_inner,scoring='f1') scores.append(cross_val_score(gs_ksvc,X,y,cv=fold_outer, scoring='f1')) print ('CV F1 Score of Kernel SVC: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_ksvc = GridSearchCV(estimator=pipe_ksvc,param_grid=params_ksvc, cv=fold_inner,scoring='precision') scores.append(cross_val_score(gs_ksvc,X,y,cv=fold_outer, scoring='precision')) print ('CV Precision Score of Kernel SVC: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_ksvc = GridSearchCV(estimator=pipe_ksvc,param_grid=params_ksvc, cv=fold_inner,scoring='recall') scores.append(cross_val_score(gs_ksvc,X,y,cv=fold_outer, scoring='recall')) print ('CV Recall Score of Kernel SVC: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) #Set the number of repeats of the cross validation N_outer = 5 #Naive Bayes scores=[] clf_nb = GaussianNB() pipe_nb = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_nb]]) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) scores.append(cross_val_score(pipe_nb,X,y,cv=fold_outer, scoring='f1')) print 'CV F1 Score of Naive Bayes: %.3f +/- %.3f' %(np.mean(scores), np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) scores.append(cross_val_score(pipe_nb,X,y,cv=fold_outer, scoring='precision')) print 'CV Precision Score of Naive Bayes: %.3f +/- %.3f' %(np.mean(scores), np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) scores.append(cross_val_score(pipe_nb,X,y,cv=fold_outer, scoring='recall')) print 'CV Recall Score of Naive Bayes: %.3f +/- %.3f' %(np.mean(scores), np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) #Set the number of repeats of the cross validation N_outer = 5 N_inner = 5 #Kernel SVC scores=[] clf_mlp = MLPClassifier(solver='lbfgs') pipe_mlp = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_mlp]]) params_mlp = {'clf__activation':['logistic','relu'],'clf__alpha':10.0**np.arange(-4,4)} t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_mlp = GridSearchCV(estimator=pipe_mlp,param_grid=params_mlp, cv=fold_inner,scoring='f1') scores.append(cross_val_score(gs_mlp,X,y,cv=fold_outer, scoring='f1')) print ('CV F1 Score of MLP: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_mlp = GridSearchCV(estimator=pipe_mlp,param_grid=params_mlp, cv=fold_inner,scoring='precision') scores.append(cross_val_score(gs_mlp,X,y,cv=fold_outer, scoring='precision')) print ('CV Precision of MLP: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_mlp = GridSearchCV(estimator=pipe_mlp,param_grid=params_mlp, cv=fold_inner,scoring='recall') scores.append(cross_val_score(gs_mlp,X,y,cv=fold_outer, scoring='recall')) print ('CV Recall Score of MLP: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) #Set the number of repeats of the cross validation N_outer = 5 N_inner = 5 #AdaBoost scores=[] clf_ada = AdaBoostClassifier(random_state=42) pipe_ada = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_ada]]) params_ada = {'clf__n_estimators':np.arange(1,11)*10} t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_ada = GridSearchCV(estimator=pipe_ada,param_grid=params_ada, cv=fold_inner,scoring='f1') scores.append(cross_val_score(gs_ada,X,y,cv=fold_outer, scoring='f1')) print ('CV F1 Score of AdaBoost: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_ada = GridSearchCV(estimator=pipe_ada,param_grid=params_ada, cv=fold_inner,scoring='precision') scores.append(cross_val_score(gs_ada,X,y,cv=fold_outer, scoring='precision')) print ('CV F1 Score of AdaBoost: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) for j in range(N_inner): fold_inner = StratifiedKFold(n_splits=5,shuffle=True,random_state=j) gs_ada = GridSearchCV(estimator=pipe_ada,param_grid=params_ada, cv=fold_inner,scoring='recall') scores.append(cross_val_score(gs_ada,X,y,cv=fold_outer, scoring='recall')) print ('CV F1 Score of AdaBoost: %.3f +/- %.3f' %(np.mean(scores), np.std(scores))) print 'Complete in %.1f sec' %(time()-t0) # Model selection for KNN based on F1 Score from IPython.core.display import display n_reps = 1000 best_params = [] clf_knn = KNeighborsClassifier() pipe_knn = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_knn]]) params_knn = {'clf__n_neighbors':np.arange(1,6)} t0 = time() for rep in np.arange(n_reps): k_fold = StratifiedKFold(n_splits=5,shuffle=True,random_state=rep) gs_knn_cv = GridSearchCV(estimator=pipe_knn,param_grid=params_knn,cv=k_fold,scoring='f1') gs_knn_cv = gs_knn_cv.fit(X,y) best_param = gs_knn_cv.best_params_ best_param.update({'Best Score': gs_knn_cv.best_score_}) best_params.append(best_param) #DataFrame summarizing average of best scores, frequency for each best parameter value best_params_df = pd.DataFrame(best_params) best_params_df = best_params_df.rename(columns={'clf__n_neighbors':'N Neighbors'}) best_params_df = best_params_df.groupby('N Neighbors')['Best Score'].describe() best_params_df = np.round(best_params_df,decimals=2).sort_values(['mean','count'],axis=0,ascending=[False,False]) display(best_params_df) print time() - t0 #Model selection for KNN based on precision from IPython.core.display import display n_reps = 1000 best_params = [] clf_knn = KNeighborsClassifier() pipe_knn = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_knn]]) params_knn = {'clf__n_neighbors':np.arange(1,6)} t0 = time() for rep in np.arange(n_reps): k_fold = StratifiedKFold(n_splits=5,shuffle=True,random_state=rep) gs_knn_cv = GridSearchCV(estimator=pipe_knn,param_grid=params_knn,cv=k_fold,scoring='precision') gs_knn_cv = gs_knn_cv.fit(X,y) best_param = gs_knn_cv.best_params_ best_param.update({'Best Score': gs_knn_cv.best_score_}) best_params.append(best_param) #DataFrame summarizing average of best scores, frequency for each best parameter value best_params_df = pd.DataFrame(best_params) best_params_df = best_params_df.rename(columns={'clf__n_neighbors':'N Neighbors'}) best_params_df = best_params_df.groupby('N Neighbors')['Best Score'].describe() best_params_df = np.round(best_params_df,decimals=2).sort_values(['mean','count'],axis=0,ascending=[False,False]) display(best_params_df) print time() - t0 #Model selection for KNN based on recall from IPython.core.display import display n_reps = 1000 best_params = [] clf_knn = KNeighborsClassifier() pipe_knn = Pipeline([['sc',MinMaxScaler()], ['kbest',SelectKBest(chi2,k=2)], ['clf',clf_knn]]) params_knn = {'clf__n_neighbors':np.arange(1,6)} t0 = time() for rep in np.arange(n_reps): k_fold = StratifiedKFold(n_splits=5,shuffle=True,random_state=rep) gs_knn_cv = GridSearchCV(estimator=pipe_knn,param_grid=params_knn,cv=k_fold,scoring='recall') gs_knn_cv = gs_knn_cv.fit(X,y) best_param = gs_knn_cv.best_params_ best_param.update({'Best Score': gs_knn_cv.best_score_}) best_params.append(best_param) #DataFrame summarizing average of best scores, frequency for each best parameter value best_params_df = pd.DataFrame(best_params) best_params_df = best_params_df.rename(columns={'clf__n_neighbors':'N Neighbors'}) best_params_df = best_params_df.groupby('N Neighbors')['Best Score'].describe() best_params_df = np.round(best_params_df,decimals=2).sort_values(['mean','count'],axis=0,ascending=[False,False]) display(best_params_df) print time() - t0 #Dummy classifier from sklearn.dummy import DummyClassifier #Set the number of repeats of the cross validation N_outer = 5 #Dummy Classifier scores=[] clf_dm = DummyClassifier(strategy='uniform') t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) scores.append(cross_val_score(clf_dm,X,y,cv=fold_outer, scoring='f1')) print 'CV F1 Score of Dummy Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) scores.append(cross_val_score(clf_dm,X,y,cv=fold_outer, scoring='precision')) print 'CV Precision Score of Dummy Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) t0 = time() for i in range(N_outer): fold_outer = StratifiedKFold(n_splits=5,shuffle=True,random_state=i) scores.append(cross_val_score(clf_dm,X,y,cv=fold_outer, scoring='recall')) print 'CV Recall Score of Dummy Classifier: %.3f +/- %.3f' %(np.mean(scores), np.std(scores)) print 'Complete in %.1f sec' %(time()-t0) ### Task 6: Dump your classifier, dataset, and features_list so anyone can ### check your results. You do not need to change anything below, but make sure ### that the version of poi_id.py that you submit can be run on its own and ### generates the necessary .pkl files for validating your results. dump_classifier_and_data(clf, my_dataset, features_list)
mit
researchstudio-sat/wonpreprocessing
python-processing/scripts/evaluation_algorithms.py
1
20879
import numpy as np import sklearn.metrics as m from tools.cosine_link_prediction import cosinus_link_prediciton from tools.evaluation_utils import EvaluationReport, NeedEvaluationDetailDict, get_optimal_threshold, \ write_ROC_curve_file, write_precision_recall_curve_file from tools.graph_utils import create_gexf_graph from tools.tensor_utils import SparseTensor, matrix_to_array, execute_rescal, predict_rescal_connections_by_threshold, \ read_input_tensor, extend_next_hop_transitive_connections, predict_rescal_connections_array, \ predict_rescal_connections_by_need_similarity, similarity_ranking __author__ = 'hfriedrich' # ======================================================================================== # Abstract class that serves as a base class for the implementation of the evaluation # of one algorithm during one cross fold validation. # ======================================================================================== class EvaluationAlgorithm: def __init__(self, args, output_folder, logger, input_tensor, start_time): self.init(args, output_folder, logger, input_tensor, start_time) def init(self, args, output_folder, logger, input_tensor, start_time): self.args = args self.logger = logger self.output_folder = output_folder self.report = EvaluationReport(logger, args.fbeta) self.ground_truth = input_tensor.copy() self.start_time = start_time # call this method in the loop at each fold def evaluate_fold(self, test_tensor, test_needs, idx_test): raise NotImplementedError("not implemented") # call this method at the end of the evaluation def finish_evaluation(self): raise NotImplementedError("not implemented") # ======================================================================================== # Implementation of evaluation of RESCAL algorithm # ======================================================================================== # Notes: # - changing the rank parameter influences the amount of internal latent "clusters" of the # algorithm and thus the quality of the matching as well as performance (memory and # execution time). # - higher threshold for RESCAL algorithm need similarity means higher recall # ======================================================================================== class RescalEvaluation(EvaluationAlgorithm): def __init__(self, args, output_folder, logger, ground_truth, start_time): self.init(args, output_folder, logger, ground_truth, start_time) self.rank = int(args.rescal[0]) self.threshold = float(args.rescal[1]) self.evalDetails = NeedEvaluationDetailDict() self.AUC_test = [] self.foldNumber = 0 self.offers = ground_truth.getOfferIndices() self.wants = ground_truth.getWantIndices() def log1(self): self.logger.info('For RESCAL prediction with threshold %f:' % self.threshold) def evaluate_fold(self, test_tensor, test_needs, idx_test): # set transitive connections before execution if (self.args.rescal[3] == 'True'): self.logger.info('extend connections transitively to the next need for RESCAL learning') test_tensor = extend_next_hop_transitive_connections(test_tensor) # execute the rescal algorithm useNeedTypeSlice = (self.args.rescal[2] == 'True') A, R = execute_rescal( test_tensor, self.rank, useNeedTypeSlice, init=self.args.rescal[4], conv=float(self.args.rescal[5]), lambda_A=float(self.args.rescal[6]), lambda_R=float(self.args.rescal[7]), lambda_V=float(self.args.rescal[8])) # evaluate the predictions self.logger.info('start predict connections ...') prediction = np.round_(predict_rescal_connections_array(A, R, idx_test), decimals=5) self.logger.info('stop predict connections') precision, recall, threshold = m.precision_recall_curve( self.ground_truth.getArrayFromSliceMatrix(SparseTensor.CONNECTION_SLICE, idx_test), prediction) optimal_threshold = get_optimal_threshold(recall, precision, threshold, self.args.fbeta) self.logger.info('optimal RESCAL threshold would be ' + str(optimal_threshold) + ' (for maximum F' + str(self.args.fbeta) + '-score)') auc = m.auc(recall, precision) self.AUC_test.append(auc) self.logger.info('AUC test: ' + str(auc)) # use a fixed threshold to compute several measures self.log1() P_bin = predict_rescal_connections_by_threshold(A, R, self.threshold, self.offers, self.wants, test_needs) binary_pred = matrix_to_array(P_bin, idx_test) self.report.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), binary_pred) if self.args.statistics: write_precision_recall_curve_file( self.output_folder + "/statistics/rescal_" + self.start_time, "precision_recall_curve_fold%d.csv" % self.foldNumber, precision, recall, threshold) TP, FP, threshold = m.roc_curve(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), prediction) write_ROC_curve_file(self.output_folder + "/statistics/rescal_" + self.start_time, "ROC_curve_fold%d.csv" % self.foldNumber, TP, FP, threshold) self.evalDetails.add_statistic_details(self.ground_truth.getSliceMatrix( SparseTensor.CONNECTION_SLICE), P_bin, idx_test, prediction) self.foldNumber += 1 def finish_evaluation(self): self.AUC_test = np.array(self.AUC_test) self.logger.info('AUC-PR Test Mean / Std: %f / %f' % (self.AUC_test.mean(), self.AUC_test.std())) self.logger.info('----------------------------------------------------') self.log1() self.report.summary() if self.args.statistics: self.evalDetails.output_statistic_details( self.output_folder + "/statistics/rescal_" + self.start_time, self.ground_truth.getHeaders(), self.args.fbeta, True) gexf = create_gexf_graph(self.ground_truth, self.evalDetails) output_file = open(self.output_folder + "/statistics/rescal_" + self.start_time + "/graph.gexf", "w") gexf.write(output_file) output_file.close() # ======================================================================================== # Implementation of evaluation of RESCAL similarity algorithm # ======================================================================================== # Notes: # - changing the rank parameter influences the amount of internal latent "clusters" of the # algorithm and thus the quality of the matching as well as performance (memory and # execution time). # - higher threshold for RESCAL algorithm need similarity means higher recall # ======================================================================================== class RescalSimilarityEvaluation(EvaluationAlgorithm): def __init__(self, args, output_folder, logger, ground_truth, start_time): self.init(args, output_folder, logger, ground_truth, start_time) self.rank = int(args.rescalsim[0]) self.threshold = float(args.rescalsim[1]) self.evalDetails = NeedEvaluationDetailDict() self.offers = ground_truth.getOfferIndices() self.wants = ground_truth.getWantIndices() self.foldNumber = 0 def log1(self): self.logger.info('For RESCAL prediction based on need similarity with threshold: %f' % self.threshold) def evaluate_fold(self, test_tensor, test_needs, idx_test): # execute the rescal algorithm useNeedTypeSlice = (self.args.rescalsim[2] == 'True') useConnectionSlice = (self.args.rescalsim[3] == 'True') A, R = execute_rescal(test_tensor, self.rank, useNeedTypeSlice, useConnectionSlice) # use the most similar needs per need to predict connections self.log1() P_bin = predict_rescal_connections_by_need_similarity(A, self.threshold, self.offers, self.wants, test_needs) binary_pred = matrix_to_array(P_bin, idx_test) self.report.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), binary_pred) if self.args.statistics: S = similarity_ranking(A) y_prop = [1.0 - i for i in np.nan_to_num(S[idx_test])] precision, recall, threshold = m.precision_recall_curve( self.ground_truth.getArrayFromSliceMatrix(SparseTensor.CONNECTION_SLICE, idx_test), y_prop) write_precision_recall_curve_file( self.output_folder + "/statistics/rescalsim_" + self.start_time, "precision_recall_curve_fold%d.csv" % self.foldNumber, precision, recall, threshold) TP, FP, threshold = m.roc_curve(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), y_prop) write_ROC_curve_file(self.output_folder + "/statistics/rescalsim_" + self.start_time, "ROC_curve_fold%d.csv" % self.foldNumber, TP, FP, threshold) self.evalDetails.add_statistic_details(self.ground_truth.getSliceMatrix( SparseTensor.CONNECTION_SLICE), P_bin, idx_test) def finish_evaluation(self): self.log1() self.report.summary() if self.args.statistics: self.evalDetails.output_statistic_details( self.output_folder + "/statistics/rescalsim_" + self.start_time, self.ground_truth.getHeaders(), self.args.fbeta) gexf = create_gexf_graph(self.ground_truth, self.evalDetails) output_file = open(self.output_folder + "/statistics/rescalsim_" + self.start_time + "/graph.gexf", "w") gexf.write(output_file) output_file.close() # ======================================================================================== # Implementation of evaluation of cosine similarity algorithm # ======================================================================================== # Notes: # higher threshold for cosine similarity link prediction means higher recall. # set transitive threshold < threshold to avoid transitive predictions. # ======================================================================================== class CosineEvaluation(EvaluationAlgorithm): def __init__(self, args, output_folder, logger, ground_truth, start_time, weighted): self.init(args, output_folder, logger, ground_truth, start_time) self.weighted = weighted self.threshold = float(args.cosine_weigthed[0]) if weighted else float(args.cosine[0]) self.transitive_threshold = float(args.cosine_weigthed[1]) if weighted else float(args.cosine[1]) self.evalDetails = NeedEvaluationDetailDict() def logEvaluationLine(self): str = "" if self.weighted: str = " weighted" self.logger.info('For prediction of%s cosine similarity between needs with thresholds %f, %f:' % (str, self.threshold, self.transitive_threshold)) def evaluate_fold(self, test_tensor, test_needs, idx_test): self.logEvaluationLine() binary_pred = cosinus_link_prediciton(test_tensor, test_needs, self.threshold, self.transitive_threshold, self.weighted) self.report.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), matrix_to_array(binary_pred, idx_test)) if self.args.statistics: self.evalDetails.add_statistic_details( self.ground_truth.getSliceMatrix(SparseTensor.CONNECTION_SLICE), binary_pred, idx_test) def finish_evaluation(self): self.logEvaluationLine() self.report.summary() if self.args.statistics: folder = "/statistics/cosine_" if self.weighted: folder = "/statistics/wcosine_" self.evalDetails.output_statistic_details( self.output_folder + folder + self.start_time, self.ground_truth.getHeaders(), self.args.fbeta) gexf = create_gexf_graph(self.ground_truth, self.evalDetails) output_file = open(self.output_folder + folder + self.start_time + "/graph.gexf", "w") gexf.write(output_file) output_file.close() # ======================================================================================== # Implementation of evaluation of loading an external matrix file with predictions # ======================================================================================== # Notes: # Matrix connection file has the same file format as connection slice of tensor # ======================================================================================== class PredictionMatrixFileEvaluation(EvaluationAlgorithm): def __init__(self, args, output_folder, logger, ground_truth, start_time): self.init(args, output_folder, logger, ground_truth, start_time) header_input = args.inputfolder + "/" + args.headers self.file_prediction_tensor = read_input_tensor( header_input, [args.prediction_matrix_file], [SparseTensor.CONNECTION_SLICE], True) def logEvaluationLine(self): self.logger.info('External file (' + self.args.prediction_matrix_file + ') predictions: ') def evaluate_fold(self, test_tensor, test_needs, idx_test): file_pred = self.file_prediction_tensor.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test); self.report.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), file_pred) def finish_evaluation(self): self.logEvaluationLine() self.report.summary() # ====================================================================================== # Implementation of evaluation of combination of algorithms RESCAL and Cosine similarity. # ====================================================================================== # Notes: # predict connections by combining the execution of algorithms. First execute the cosine # similarity algorithm (preferably choosing a threshold to get a high precision) and with # this predicted matches execute the RESCAL algorithm afterwards (to increase the recall) # ====================================================================================== class CombineCosineRescalEvaluation(EvaluationAlgorithm): def __init__(self, args, output_folder, logger, ground_truth, start_time): self.init(args, output_folder, logger, ground_truth, start_time) self.report2 = EvaluationReport(logger, args.fbeta) def log1(self): self.logger.info('First step for prediction of cosine similarity with threshold: %f:' % float(self.args.cosine_rescal[2])) def log2(self): self.logger.info('And second step for combined RESCAL prediction with parameters: %d, %f:' % (int(self.args.cosine_rescal[0]), float(self.args.cosine_rescal[1]))) def evaluate_fold(self, test_tensor, test_needs, idx_test): cosine_pred, rescal_pred = self.predict_combine_cosine_rescal( test_tensor, test_needs, idx_test, int(self.args.cosine_rescal[0]), float(self.args.cosine_rescal[1]), float(self.args.cosine_rescal[2]), bool(self.args.cosine_rescal[3])) self.log1() self.report.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), cosine_pred) self.log2() self.report2.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE,idx_test), rescal_pred) def finish_evaluation(self): self.log1() self.report.summary() self.log2() self.report2.summary() # predict connections by combining the execution of algorithms def predict_combine_cosine_rescal(self, input_tensor, test_needs, idx_test, rank, rescal_threshold, cosine_threshold, useNeedTypeSlice): wants = input_tensor.getWantIndices() offers = input_tensor.getOfferIndices() # execute the cosine algorithm first binary_pred_cosine = cosinus_link_prediciton(input_tensor, test_needs, cosine_threshold, 0.0, False) # use the connection prediction of the cosine algorithm as input for rescal temp_tensor = input_tensor.copy() temp_tensor.addSliceMatrix(binary_pred_cosine, SparseTensor.CONNECTION_SLICE) A,R = execute_rescal(temp_tensor, rank) P_bin = predict_rescal_connections_by_threshold(A, R, rescal_threshold, offers, wants, test_needs) # return both predictions the earlier cosine and the combined rescal binary_pred_cosine = binary_pred_cosine[idx_test] binary_pred_rescal = matrix_to_array(P_bin, idx_test) return binary_pred_cosine, binary_pred_rescal # ====================================================================================== # Implementation of evaluation of intersection of algorithms RESCAL and Cosine similarity. # ====================================================================================== # Notes: # predict connections by combining the execution of algorithms. Compute the predictions # of connections for both cosine similarity and rescal algorithm. Then return the # intersection of the predictions. # ====================================================================================== class IntersectionCosineRescalEvaluation(EvaluationAlgorithm): def __init__(self, args, output_folder, logger, ground_truth, start_time): self.init(args, output_folder, logger, ground_truth, start_time) self.report2 = EvaluationReport(logger, args.fbeta) self.report3 = EvaluationReport(logger, args.fbeta) def log1(self): self.logger.info('Intersection of predictions of cosine similarity and rescal algorithms: ') def log2(self): self.logger.info('For RESCAL prediction with threshold %f:' % float(self.args.intersection[1])) def log3(self): self.logger.info('For prediction of cosine similarity between needs with thresholds: %f:' % float(self.args.intersection[2])) def evaluate_fold(self, test_tensor, test_needs, idx_test): inter_pred, cosine_pred, rescal_pred = self.predict_intersect_cosine_rescal( test_tensor, test_needs, idx_test, int(self.args.intersection[0]), float(self.args.intersection[1]), float(self.args.intersection[2]), bool(self.args.intersection[3])) self.log1() self.report.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE, idx_test), inter_pred) self.log2() self.report2.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE,idx_test), rescal_pred) self.log3() self.report3.add_evaluation_data(self.ground_truth.getArrayFromSliceMatrix( SparseTensor.CONNECTION_SLICE,idx_test), cosine_pred) def finish_evaluation(self): self.log1() self.report.summary() self.log2() self.report2.summary() self.log3() self.report3.summary() # predict connections by intersection of RESCAL and cosine results def predict_intersect_cosine_rescal(self, input_tensor, test_needs, idx_test, rank, rescal_threshold, cosine_threshold, useNeedTypeSlice): wants = input_tensor.getWantIndices() offers = input_tensor.getOfferIndices() # execute the cosine algorithm binary_pred_cosine = cosinus_link_prediciton(input_tensor, test_needs, cosine_threshold, 0.0, False) # execute the rescal algorithm A,R = execute_rescal(input_tensor, rank) P_bin = predict_rescal_connections_by_threshold(A, R, rescal_threshold, offers, wants, test_needs) # return the intersection of the prediction of both algorithms binary_pred_cosine = matrix_to_array(binary_pred_cosine, idx_test) binary_pred_rescal = matrix_to_array(P_bin, idx_test) binary_pred = [min(binary_pred_cosine[i], binary_pred_rescal[i]) for i in range(len(binary_pred_cosine))] return binary_pred, binary_pred_cosine, binary_pred_rescal
apache-2.0
Eric89GXL/scikit-learn
examples/gaussian_process/plot_gp_regression.py
253
4054
#!/usr/bin/python # -*- coding: utf-8 -*- r""" ========================================================= Gaussian Processes regression: basic introductory example ========================================================= A simple one-dimensional regression exercise computed in two different ways: 1. A noise-free case with a cubic correlation model 2. A noisy case with a squared Euclidean correlation model In both cases, the model parameters are estimated using the maximum likelihood principle. The figures illustrate the interpolating property of the Gaussian Process model as well as its probabilistic nature in the form of a pointwise 95% confidence interval. Note that the parameter ``nugget`` is applied as a Tikhonov regularization of the assumed covariance between the training points. In the special case of the squared euclidean correlation model, nugget is mathematically equivalent to a normalized variance: That is .. math:: \mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2 """ print(__doc__) # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # Jake Vanderplas <vanderplas@astro.washington.edu> # Licence: BSD 3 clause import numpy as np from sklearn.gaussian_process import GaussianProcess from matplotlib import pyplot as pl np.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) #---------------------------------------------------------------------- # First the noiseless case X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T # Observations y = f(X).ravel() # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.plot(X, y, 'r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') #---------------------------------------------------------------------- # now the noisy case X = np.linspace(0.1, 9.9, 20) X = np.atleast_2d(X).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='squared_exponential', theta0=1e-1, thetaL=1e-3, thetaU=1, nugget=(dy / y) ** 2, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') pl.show()
bsd-3-clause
ShownX/incubator-mxnet
example/rcnn/rcnn/pycocotools/coco.py
41
19083
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. __author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # annToMask - Convert segmentation in an annotation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>annToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import copy import itertools from . import mask as maskUtils import os from collections import defaultdict import sys PYTHON_VERSION = sys.version_info[0] if PYTHON_VERSION == 2: from urllib import urlretrieve elif PYTHON_VERSION == 3: from urllib.request import urlretrieve class COCO: def __init__(self, annotation_file=None): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) if not annotation_file == None: print('loading annotations into memory...') tic = time.time() dataset = json.load(open(annotation_file, 'r')) assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset)) print('Done (t={:0.2f}s)'.format(time.time()- tic)) self.dataset = dataset self.createIndex() def createIndex(self): # create index print('creating index...') anns, cats, imgs = {}, {}, {} imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat if 'annotations' in self.dataset and 'categories' in self.dataset: for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print('index created!') # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print('{}: {}'.format(key, value)) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception('datasetType not supported') if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((int(len(seg)/2), 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = maskUtils.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print(ann['caption']) def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print('Loading and preparing results...') tic = time.time() if type(resFile) == str or type(resFile) == unicode: anns = json.load(open(resFile)) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = maskUtils.area(ann['segmentation']) if not 'bbox' in ann: ann['bbox'] = maskUtils.toBbox(ann['segmentation']) ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print('DONE (t={:0.2f}s)'.format(time.time()- tic)) res.dataset['annotations'] = anns res.createIndex() return res def download(self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print('Please specify target directory') return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urlretrieve(img['coco_url'], fname) print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic)) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print('Converting ndarray to lists...') assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print('{}/{}'.format(i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann def annToRLE(self, ann): """ Convert annotation which can be polygons, uncompressed RLE to RLE. :return: binary mask (numpy 2D array) """ t = self.imgs[ann['image_id']] h, w = t['height'], t['width'] segm = ann['segmentation'] if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm['counts']) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = ann['segmentation'] return rle def annToMask(self, ann): """ Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. :return: binary mask (numpy 2D array) """ rle = self.annToRLE(ann) m = maskUtils.decode(rle) return m
apache-2.0